diff --git a/examples/algorithms/test_algo/test_algo.py b/examples/algorithms/test_algo/test_algo.py index c44752a8d8..a35449f1a8 100644 --- a/examples/algorithms/test_algo/test_algo.py +++ b/examples/algorithms/test_algo/test_algo.py @@ -3,24 +3,40 @@ import numpy as np import structlog +from pydantic import BaseModel +from ziplime.config.base_algorithm_config import BaseAlgorithmConfig from ziplime.domain.bar_data import BarData from ziplime.finance.execution import MarketOrder from ziplime.trading.trading_algorithm import TradingAlgorithm logger = structlog.get_logger(__name__) + + +class EquityToTrade(BaseModel): + symbol: str + target_percentage: float + + +class AlgorithmConfig(BaseAlgorithmConfig): + currency: str + equities_to_trade: list[EquityToTrade] + + async def initialize(context): context.assets = [ await context.symbol("META"), - await context.symbol("AAPL"), await context.symbol("AMZN"), await context.symbol("NFLX"), await context.symbol("GOOGL") ] + # read config file + logger.info("Algorithm config: ", config=context.algorithm.config) + async def handle_data(context, data): num_assets = len(context.assets) target_percent = 1.0 / num_assets for asset in context.assets: - await context.order_target_percent(asset=asset, target=target_percent, style=MarketOrder()) - + await context.order_target_percent(asset=asset, + target=target_percent, style=MarketOrder()) diff --git a/examples/ingest_assets_data_grpc.py b/examples/ingest_assets_data_grpc.py deleted file mode 100644 index 29fb3bceb7..0000000000 --- a/examples/ingest_assets_data_grpc.py +++ /dev/null @@ -1,19 +0,0 @@ -import asyncio -import logging - -from ziplime.core.ingest_data import get_asset_service, ingest_assets -from ziplime.data.data_sources.grpc.grpc_asset_data_source import GrpcAssetDataSource -from ziplime.utils.logging_utils import configure_logging - - -async def ingest_assets_data_grpc(): - asset_data_source = GrpcAssetDataSource.from_env() - asset_service = get_asset_service( - clear_asset_db=True, - ) - await ingest_assets(asset_service=asset_service, asset_data_source=asset_data_source) - - -if __name__ == "__main__": - configure_logging(level=logging.INFO, file_name="mylog.log") - asyncio.run(ingest_assets_data_grpc()) diff --git a/examples/ingest_data_grpc.py b/examples/ingest_data_grpc.py deleted file mode 100644 index d886a45306..0000000000 --- a/examples/ingest_data_grpc.py +++ /dev/null @@ -1,41 +0,0 @@ -import asyncio -import datetime -import logging -import os -import pathlib - -import pytz - -from ziplime.core.ingest_data import get_asset_service, ingest_market_data -from ziplime.data.data_sources.grpc.grpc_data_source import GrpcDataSource -from ziplime.utils.logging_utils import configure_logging - - -async def _ingest_data_grpc(): - asset_service = get_asset_service( - clear_asset_db=False, - # db_path=str(pathlib.Path(__file__).parent.parent.resolve().joinpath("data", "assets.sqlite")) - ) - - symbols = ["META", "AAPL", "AMZN", "NFLX", "GOOGL", "VXX@BATS"] - - start_date = datetime.datetime(year=2025, month=9, day=1, tzinfo=pytz.timezone("America/New_York")) - end_date = datetime.datetime(year=2025, month=9, day=18, tzinfo=pytz.timezone("America/New_York")) - market_data_bundle_source = GrpcDataSource.from_env() - await market_data_bundle_source.get_token() - - await ingest_market_data( - start_date=start_date, - end_date=end_date, - symbols=symbols, - trading_calendar="NYSE", - bundle_name="grpc_daily_data", - data_bundle_source=market_data_bundle_source, - data_frequency=datetime.timedelta(days=1), - asset_service=asset_service - ) - - -if __name__ == "__main__": - configure_logging(level=logging.INFO, file_name="mylog.log") - asyncio.run(_ingest_data_grpc()) diff --git a/examples/ingest_data_limex_hub.py b/examples/ingest_data_limex_hub.py index e76edfde02..fb66687fb0 100644 --- a/examples/ingest_data_limex_hub.py +++ b/examples/ingest_data_limex_hub.py @@ -22,7 +22,7 @@ async def ingest_data_limex_hub(): # STEP 1: Define symbols, date range and frequency of the data that we are going to ingest symbols = ["META", "AAPL", "AMZN", "NFLX", "GOOGL"] start_date = datetime.datetime(year=2025, month=1, day=1, tzinfo=datetime.timezone.utc) - end_date = datetime.datetime(year=2025, month=2, day=27, tzinfo=datetime.timezone.utc) + end_date = datetime.datetime(year=2025, month=10, day=27, tzinfo=datetime.timezone.utc) data_frequency = datetime.timedelta(minutes=1) # STEP 2: Initialize market data source and data bundle source - LimexHub market_data_bundle_source = LimexHubDataSource.from_env() diff --git a/examples/run_simulation_daily.py b/examples/run_simulation_daily.py index de796ad679..a752d24a2a 100644 --- a/examples/run_simulation_daily.py +++ b/examples/run_simulation_daily.py @@ -15,10 +15,7 @@ from ziplime.core.ingest_data import get_asset_service from ziplime.core.run_simulation import run_simulation -from ziplime.data.services.bundle_service import BundleService -from ziplime.data.services.file_system_bundle_registry import FileSystemBundleRegistry -from ziplime.finance.commission import PerShare, DEFAULT_PER_SHARE_COST, DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE, \ - PerContract, DEFAULT_PER_CONTRACT_COST, EquityCommissionModel +from ziplime.finance.commission import PerShare, DEFAULT_PER_SHARE_COST, DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE logger = structlog.get_logger(__name__) @@ -55,8 +52,8 @@ async def _run_simulation(): ) custom_data_sources = [] - custom_data_sources.append( - await bundle_service.load_bundle(bundle_name="limex_us_fundamental_data", bundle_version=None)) + # custom_data_sources.append( + # await bundle_service.load_bundle(bundle_name="limex_us_fundamental_data", bundle_version=None)) equity_commission = PerShare( cost=DEFAULT_PER_SHARE_COST, diff --git a/examples/run_simulation_daily_grpc.py b/examples/run_simulation_daily_single_execution.py similarity index 50% rename from examples/run_simulation_daily_grpc.py rename to examples/run_simulation_daily_single_execution.py index eb9758c8ab..7752c46055 100644 --- a/examples/run_simulation_daily_grpc.py +++ b/examples/run_simulation_daily_single_execution.py @@ -6,6 +6,9 @@ import polars as pl import structlog +from ziplime.gens.domain.single_execution_clock import SingleExecutionClock +from ziplime.utils.bundle_utils import get_bundle_service +from ziplime.utils.calendar_utils import get_calendar from ziplime.utils.logging_utils import configure_logging from pathlib import Path @@ -14,27 +17,22 @@ from ziplime.core.ingest_data import get_asset_service from ziplime.core.run_simulation import run_simulation -from ziplime.data.services.bundle_service import BundleService -from ziplime.data.services.file_system_bundle_registry import FileSystemBundleRegistry +from ziplime.finance.commission import PerShare, DEFAULT_PER_SHARE_COST, DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE logger = structlog.get_logger(__name__) async def _run_simulation(): - bundle_storage_path = str(Path(Path.home(), ".ziplime", "data")) - bundle_registry = FileSystemBundleRegistry(base_data_path=bundle_storage_path) - bundle_service = BundleService(bundle_registry=bundle_registry) + start_date = datetime.datetime(year=2025, month=1, day=3, tzinfo=pytz.timezone("America/New_York")) + end_date = datetime.datetime(year=2025, month=2, day=1, tzinfo=pytz.timezone("America/New_York")) + emission_rate = datetime.timedelta(days=1) + bundle_service = get_bundle_service() + asset_service = get_asset_service( clear_asset_db=False, - # db_path=str(pathlib.Path(__file__).parent.parent.resolve().joinpath("data", "assets.sqlite")) + db_path=str(pathlib.Path(__file__).parent.parent.resolve().joinpath("data", "assets.sqlite")) ) - symbols = ["META", "AAPL", "AMZN", "NFLX", "GOOGL", "VXX"] - ny = pytz.timezone("America/New_York") - start_local = datetime.datetime(2025, 9, 1, 0, 0) # 2025-09-01 00:00 local clock time - end_local = datetime.datetime(2025, 9, 17, 0, 0) # 2025-09-01 00:00 local clock time - - start_date = ny.localize(start_local) # Correct: EDT (UTC-04:00) - end_date = ny.localize(end_local) + # Use aggregations if you ingested data of frequnecy less than 1 day aggregations = [ pl.col("open").first(), pl.col("high").max(), @@ -43,18 +41,33 @@ async def _run_simulation(): pl.col("volume").sum(), pl.col("symbol").last() ] - market_data_bundle = await bundle_service.load_bundle(bundle_name="grpc_daily_data", + market_data_bundle = await bundle_service.load_bundle(bundle_name="limex_us_minute_data", bundle_version=None, frequency=datetime.timedelta(days=1), start_date=start_date, - end_date=end_date + datetime.timedelta(days=1), - symbols=symbols, - aggregations=aggregations + end_date=end_date, + symbols=["META", "AAPL", "AMZN", "NFLX", "GOOGL", + ], + start_auction_delta=datetime.timedelta(minutes=15), + end_auction_delta=datetime.timedelta(minutes=15), + aggregations=aggregations, ) + custom_data_sources = [] + custom_data_sources.append( + await bundle_service.load_bundle(bundle_name="limex_us_fundamental_data", bundle_version=None)) - # By default, SimulationExchange with LIME name is used + equity_commission = PerShare( + cost=DEFAULT_PER_SHARE_COST, + min_trade_cost=DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE, + ) + clock = SingleExecutionClock( + trading_calendar=get_calendar("NYSE"), + start_date=start_date, + end_date=end_date, + emission_rate=emission_rate, + ) # run daily simulation res, errors = await run_simulation( start_date=start_date, @@ -63,15 +76,17 @@ async def _run_simulation(): algorithm_file=str(Path("algorithms/test_algo/test_algo.py").absolute()), total_cash=100000.0, market_data_source=market_data_bundle, - custom_data_sources=[], + custom_data_sources=custom_data_sources, config_file=str(Path("algorithms/test_algo/test_algo_config.json").absolute()), - emission_rate=datetime.timedelta(days=1), - benchmark_asset_symbol="VXX", + emission_rate=emission_rate, + benchmark_asset_symbol="AAPL", benchmark_returns=None, - stop_on_error=False, + stop_on_error=True, asset_service=asset_service, - # default_exchange_name="BATS" + equity_commission=equity_commission, + clock=clock ) + if errors: logger.error(errors) print(res.head(n=10).to_markdown()) diff --git a/examples/run_simulation_minute.py b/examples/run_simulation_minute.py index 14f4d83321..71ed429ece 100644 --- a/examples/run_simulation_minute.py +++ b/examples/run_simulation_minute.py @@ -45,6 +45,7 @@ async def _run_simulation(): # run daily simulation res, errors = await run_simulation( + asset_service=asset_service, start_date=start_date, end_date=end_date, trading_calendar="NYSE", diff --git a/examples/run_simulation_minute_custom_csv_data.py b/examples/run_simulation_minute_custom_csv_data.py index 9d76621692..c3212f43f7 100644 --- a/examples/run_simulation_minute_custom_csv_data.py +++ b/examples/run_simulation_minute_custom_csv_data.py @@ -82,6 +82,7 @@ async def _run_simulation(): benchmark_asset_symbol="SPX", benchmark_returns=None, stop_on_error=False, + asset_service=asset_service ) if errors: diff --git a/poetry.lock b/poetry.lock index 7ad3256ff5..e7268b1927 100644 --- a/poetry.lock +++ b/poetry.lock @@ -50,14 +50,14 @@ docs = ["sphinx (==8.1.3)", "sphinx-mdinclude (==0.6.1)"] [[package]] name = "alembic" -version = "1.17.0" +version = "1.18.1" description = "A database migration tool for SQLAlchemy." optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "alembic-1.17.0-py3-none-any.whl", hash = "sha256:80523bc437d41b35c5db7e525ad9d908f79de65c27d6a5a5eab6df348a352d99"}, - {file = "alembic-1.17.0.tar.gz", hash = "sha256:4652a0b3e19616b57d652b82bfa5e38bf5dbea0813eed971612671cb9e90c0fe"}, + {file = "alembic-1.18.1-py3-none-any.whl", hash = "sha256:f1c3b0920b87134e851c25f1f7f236d8a332c34b75416802d06971df5d1b7810"}, + {file = "alembic-1.18.1.tar.gz", hash = "sha256:83ac6b81359596816fb3b893099841a0862f2117b2963258e965d70dc62fb866"}, ] [package.dependencies] @@ -82,23 +82,38 @@ files = [ [[package]] name = "anyio" -version = "4.11.0" +version = "4.12.1" description = "High-level concurrency and networking framework on top of asyncio or Trio" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc"}, - {file = "anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4"}, + {file = "anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c"}, + {file = "anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703"}, ] [package.dependencies] idna = ">=2.8" -sniffio = ">=1.1" typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -trio = ["trio (>=0.31.0)"] +trio = ["trio (>=0.31.0) ; python_version < \"3.10\"", "trio (>=0.32.0) ; python_version >= \"3.10\""] + +[[package]] +name = "asttokens" +version = "3.0.1" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "asttokens-3.0.1-py3-none-any.whl", hash = "sha256:15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a"}, + {file = "asttokens-3.0.1.tar.gz", hash = "sha256:71a4ee5de0bde6a31d64f6b13f2293ac190344478f081c3d1bccfcf5eacb0cb7"}, +] + +[package.extras] +astroid = ["astroid (>=2,<5)"] +test = ["astroid (>=2,<5)", "pytest (<9.0)", "pytest-cov", "pytest-xdist"] [[package]] name = "asyncclick" @@ -151,19 +166,19 @@ dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)" [[package]] name = "backrefs" -version = "5.9" +version = "6.1" description = "A wrapper around re and regex that adds additional back references." optional = false python-versions = ">=3.9" groups = ["docs"] files = [ - {file = "backrefs-5.9-py310-none-any.whl", hash = "sha256:db8e8ba0e9de81fcd635f440deab5ae5f2591b54ac1ebe0550a2ca063488cd9f"}, - {file = "backrefs-5.9-py311-none-any.whl", hash = "sha256:6907635edebbe9b2dc3de3a2befff44d74f30a4562adbb8b36f21252ea19c5cf"}, - {file = "backrefs-5.9-py312-none-any.whl", hash = "sha256:7fdf9771f63e6028d7fee7e0c497c81abda597ea45d6b8f89e8ad76994f5befa"}, - {file = "backrefs-5.9-py313-none-any.whl", hash = "sha256:cc37b19fa219e93ff825ed1fed8879e47b4d89aa7a1884860e2db64ccd7c676b"}, - {file = "backrefs-5.9-py314-none-any.whl", hash = "sha256:df5e169836cc8acb5e440ebae9aad4bf9d15e226d3bad049cf3f6a5c20cc8dc9"}, - {file = "backrefs-5.9-py39-none-any.whl", hash = "sha256:f48ee18f6252b8f5777a22a00a09a85de0ca931658f1dd96d4406a34f3748c60"}, - {file = "backrefs-5.9.tar.gz", hash = "sha256:808548cb708d66b82ee231f962cb36faaf4f2baab032f2fbb783e9c2fdddaa59"}, + {file = "backrefs-6.1-py310-none-any.whl", hash = "sha256:2a2ccb96302337ce61ee4717ceacfbf26ba4efb1d55af86564b8bbaeda39cac1"}, + {file = "backrefs-6.1-py311-none-any.whl", hash = "sha256:e82bba3875ee4430f4de4b6db19429a27275d95a5f3773c57e9e18abc23fd2b7"}, + {file = "backrefs-6.1-py312-none-any.whl", hash = "sha256:c64698c8d2269343d88947c0735cb4b78745bd3ba590e10313fbf3f78c34da5a"}, + {file = "backrefs-6.1-py313-none-any.whl", hash = "sha256:4c9d3dc1e2e558965202c012304f33d4e0e477e1c103663fd2c3cc9bb18b0d05"}, + {file = "backrefs-6.1-py314-none-any.whl", hash = "sha256:13eafbc9ccd5222e9c1f0bec563e6d2a6d21514962f11e7fc79872fd56cbc853"}, + {file = "backrefs-6.1-py39-none-any.whl", hash = "sha256:a9e99b8a4867852cad177a6430e31b0f6e495d65f8c6c134b68c14c3c95bf4b0"}, + {file = "backrefs-6.1.tar.gz", hash = "sha256:3bba1749aafe1db9b915f00e0dd166cba613b6f788ffd63060ac3485dc9be231"}, ] [package.extras] @@ -171,18 +186,18 @@ extras = ["regex"] [[package]] name = "beautifulsoup4" -version = "4.14.2" +version = "4.14.3" description = "Screen-scraping library" optional = false python-versions = ">=3.7.0" groups = ["main"] files = [ - {file = "beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515"}, - {file = "beautifulsoup4-4.14.2.tar.gz", hash = "sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e"}, + {file = "beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb"}, + {file = "beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86"}, ] [package.dependencies] -soupsieve = ">1.2" +soupsieve = ">=1.6.1" typing-extensions = ">=4.0.0" [package.extras] @@ -275,14 +290,14 @@ attrs = ">=20" [[package]] name = "certifi" -version = "2025.10.5" +version = "2026.1.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" -groups = ["main", "docs"] +groups = ["main", "docs", "vectorbt"] files = [ - {file = "certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de"}, - {file = "certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43"}, + {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, + {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, ] [[package]] @@ -388,7 +403,7 @@ version = "3.4.4" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" -groups = ["main", "docs"] +groups = ["main", "docs", "vectorbt"] files = [ {file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"}, {file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8"}, @@ -507,14 +522,14 @@ files = [ [[package]] name = "click" -version = "8.3.0" +version = "8.3.1" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.10" groups = ["docs"] files = [ - {file = "click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc"}, - {file = "click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4"}, + {file = "click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6"}, + {file = "click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a"}, ] [package.dependencies] @@ -526,31 +541,140 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main", "docs"] +groups = ["main", "docs", "vectorbt"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {main = "platform_system == \"Windows\""} +markers = {main = "platform_system == \"Windows\"", vectorbt = "sys_platform == \"win32\" or platform_system == \"Windows\""} + +[[package]] +name = "comm" +version = "0.2.3" +description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "comm-0.2.3-py3-none-any.whl", hash = "sha256:c615d91d75f7f04f095b30d1c1711babd43bdc6419c1be9886a85f2f4e489417"}, + {file = "comm-0.2.3.tar.gz", hash = "sha256:2dc8048c10962d55d7ad693be1e7045d891b7ce8d999c97963a5e3e99c055971"}, +] + +[package.extras] +test = ["pytest"] + +[[package]] +name = "contourpy" +version = "1.3.3" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.11" +groups = ["vectorbt"] +files = [ + {file = "contourpy-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:709a48ef9a690e1343202916450bc48b9e51c049b089c7f79a267b46cffcdaa1"}, + {file = "contourpy-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:23416f38bfd74d5d28ab8429cc4d63fa67d5068bd711a85edb1c3fb0c3e2f381"}, + {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:929ddf8c4c7f348e4c0a5a3a714b5c8542ffaa8c22954862a46ca1813b667ee7"}, + {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9e999574eddae35f1312c2b4b717b7885d4edd6cb46700e04f7f02db454e67c1"}, + {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf67e0e3f482cb69779dd3061b534eb35ac9b17f163d851e2a547d56dba0a3a"}, + {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51e79c1f7470158e838808d4a996fa9bac72c498e93d8ebe5119bc1e6becb0db"}, + {file = "contourpy-1.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:598c3aaece21c503615fd59c92a3598b428b2f01bfb4b8ca9c4edeecc2438620"}, + {file = "contourpy-1.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:322ab1c99b008dad206d406bb61d014cf0174df491ae9d9d0fac6a6fda4f977f"}, + {file = "contourpy-1.3.3-cp311-cp311-win32.whl", hash = "sha256:fd907ae12cd483cd83e414b12941c632a969171bf90fc937d0c9f268a31cafff"}, + {file = "contourpy-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:3519428f6be58431c56581f1694ba8e50626f2dd550af225f82fb5f5814d2a42"}, + {file = "contourpy-1.3.3-cp311-cp311-win_arm64.whl", hash = "sha256:15ff10bfada4bf92ec8b31c62bf7c1834c244019b4a33095a68000d7075df470"}, + {file = "contourpy-1.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b08a32ea2f8e42cf1d4be3169a98dd4be32bafe4f22b6c4cb4ba810fa9e5d2cb"}, + {file = "contourpy-1.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:556dba8fb6f5d8742f2923fe9457dbdd51e1049c4a43fd3986a0b14a1d815fc6"}, + {file = "contourpy-1.3.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92d9abc807cf7d0e047b95ca5d957cf4792fcd04e920ca70d48add15c1a90ea7"}, + {file = "contourpy-1.3.3-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2e8faa0ed68cb29af51edd8e24798bb661eac3bd9f65420c1887b6ca89987c8"}, + {file = "contourpy-1.3.3-cp312-cp312-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:626d60935cf668e70a5ce6ff184fd713e9683fb458898e4249b63be9e28286ea"}, + {file = "contourpy-1.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d00e655fcef08aba35ec9610536bfe90267d7ab5ba944f7032549c55a146da1"}, + {file = "contourpy-1.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:451e71b5a7d597379ef572de31eeb909a87246974d960049a9848c3bc6c41bf7"}, + {file = "contourpy-1.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:459c1f020cd59fcfe6650180678a9993932d80d44ccde1fa1868977438f0b411"}, + {file = "contourpy-1.3.3-cp312-cp312-win32.whl", hash = "sha256:023b44101dfe49d7d53932be418477dba359649246075c996866106da069af69"}, + {file = "contourpy-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:8153b8bfc11e1e4d75bcb0bff1db232f9e10b274e0929de9d608027e0d34ff8b"}, + {file = "contourpy-1.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:07ce5ed73ecdc4a03ffe3e1b3e3c1166db35ae7584be76f65dbbe28a7791b0cc"}, + {file = "contourpy-1.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:177fb367556747a686509d6fef71d221a4b198a3905fe824430e5ea0fda54eb5"}, + {file = "contourpy-1.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d002b6f00d73d69333dac9d0b8d5e84d9724ff9ef044fd63c5986e62b7c9e1b1"}, + {file = "contourpy-1.3.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:348ac1f5d4f1d66d3322420f01d42e43122f43616e0f194fc1c9f5d830c5b286"}, + {file = "contourpy-1.3.3-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:655456777ff65c2c548b7c454af9c6f33f16c8884f11083244b5819cc214f1b5"}, + {file = "contourpy-1.3.3-cp313-cp313-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:644a6853d15b2512d67881586bd03f462c7ab755db95f16f14d7e238f2852c67"}, + {file = "contourpy-1.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4debd64f124ca62069f313a9cb86656ff087786016d76927ae2cf37846b006c9"}, + {file = "contourpy-1.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a15459b0f4615b00bbd1e91f1b9e19b7e63aea7483d03d804186f278c0af2659"}, + {file = "contourpy-1.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca0fdcd73925568ca027e0b17ab07aad764be4706d0a925b89227e447d9737b7"}, + {file = "contourpy-1.3.3-cp313-cp313-win32.whl", hash = "sha256:b20c7c9a3bf701366556e1b1984ed2d0cedf999903c51311417cf5f591d8c78d"}, + {file = "contourpy-1.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:1cadd8b8969f060ba45ed7c1b714fe69185812ab43bd6b86a9123fe8f99c3263"}, + {file = "contourpy-1.3.3-cp313-cp313-win_arm64.whl", hash = "sha256:fd914713266421b7536de2bfa8181aa8c699432b6763a0ea64195ebe28bff6a9"}, + {file = "contourpy-1.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:88df9880d507169449d434c293467418b9f6cbe82edd19284aa0409e7fdb933d"}, + {file = "contourpy-1.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d06bb1f751ba5d417047db62bca3c8fde202b8c11fb50742ab3ab962c81e8216"}, + {file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e4e6b05a45525357e382909a4c1600444e2a45b4795163d3b22669285591c1ae"}, + {file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ab3074b48c4e2cf1a960e6bbeb7f04566bf36b1861d5c9d4d8ac04b82e38ba20"}, + {file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c3d53c796f8647d6deb1abe867daeb66dcc8a97e8455efa729516b997b8ed99"}, + {file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50ed930df7289ff2a8d7afeb9603f8289e5704755c7e5c3bbd929c90c817164b"}, + {file = "contourpy-1.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4feffb6537d64b84877da813a5c30f1422ea5739566abf0bd18065ac040e120a"}, + {file = "contourpy-1.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2b7e9480ffe2b0cd2e787e4df64270e3a0440d9db8dc823312e2c940c167df7e"}, + {file = "contourpy-1.3.3-cp313-cp313t-win32.whl", hash = "sha256:283edd842a01e3dcd435b1c5116798d661378d83d36d337b8dde1d16a5fc9ba3"}, + {file = "contourpy-1.3.3-cp313-cp313t-win_amd64.whl", hash = "sha256:87acf5963fc2b34825e5b6b048f40e3635dd547f590b04d2ab317c2619ef7ae8"}, + {file = "contourpy-1.3.3-cp313-cp313t-win_arm64.whl", hash = "sha256:3c30273eb2a55024ff31ba7d052dde990d7d8e5450f4bbb6e913558b3d6c2301"}, + {file = "contourpy-1.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fde6c716d51c04b1c25d0b90364d0be954624a0ee9d60e23e850e8d48353d07a"}, + {file = "contourpy-1.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cbedb772ed74ff5be440fa8eee9bd49f64f6e3fc09436d9c7d8f1c287b121d77"}, + {file = "contourpy-1.3.3-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:22e9b1bd7a9b1d652cd77388465dc358dafcd2e217d35552424aa4f996f524f5"}, + {file = "contourpy-1.3.3-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a22738912262aa3e254e4f3cb079a95a67132fc5a063890e224393596902f5a4"}, + {file = "contourpy-1.3.3-cp314-cp314-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:afe5a512f31ee6bd7d0dda52ec9864c984ca3d66664444f2d72e0dc4eb832e36"}, + {file = "contourpy-1.3.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f64836de09927cba6f79dcd00fdd7d5329f3fccc633468507079c829ca4db4e3"}, + {file = "contourpy-1.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1fd43c3be4c8e5fd6e4f2baeae35ae18176cf2e5cced681cca908addf1cdd53b"}, + {file = "contourpy-1.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6afc576f7b33cf00996e5c1102dc2a8f7cc89e39c0b55df93a0b78c1bd992b36"}, + {file = "contourpy-1.3.3-cp314-cp314-win32.whl", hash = "sha256:66c8a43a4f7b8df8b71ee1840e4211a3c8d93b214b213f590e18a1beca458f7d"}, + {file = "contourpy-1.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:cf9022ef053f2694e31d630feaacb21ea24224be1c3ad0520b13d844274614fd"}, + {file = "contourpy-1.3.3-cp314-cp314-win_arm64.whl", hash = "sha256:95b181891b4c71de4bb404c6621e7e2390745f887f2a026b2d99e92c17892339"}, + {file = "contourpy-1.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:33c82d0138c0a062380332c861387650c82e4cf1747aaa6938b9b6516762e772"}, + {file = "contourpy-1.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ea37e7b45949df430fe649e5de8351c423430046a2af20b1c1961cae3afcda77"}, + {file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d304906ecc71672e9c89e87c4675dc5c2645e1f4269a5063b99b0bb29f232d13"}, + {file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca658cd1a680a5c9ea96dc61cdbae1e85c8f25849843aa799dfd3cb370ad4fbe"}, + {file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ab2fd90904c503739a75b7c8c5c01160130ba67944a7b77bbf36ef8054576e7f"}, + {file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7301b89040075c30e5768810bc96a8e8d78085b47d8be6e4c3f5a0b4ed478a0"}, + {file = "contourpy-1.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2a2a8b627d5cc6b7c41a4beff6c5ad5eb848c88255fda4a8745f7e901b32d8e4"}, + {file = "contourpy-1.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fd6ec6be509c787f1caf6b247f0b1ca598bef13f4ddeaa126b7658215529ba0f"}, + {file = "contourpy-1.3.3-cp314-cp314t-win32.whl", hash = "sha256:e74a9a0f5e3fff48fb5a7f2fd2b9b70a3fe014a67522f79b7cca4c0c7e43c9ae"}, + {file = "contourpy-1.3.3-cp314-cp314t-win_amd64.whl", hash = "sha256:13b68d6a62db8eafaebb8039218921399baf6e47bf85006fd8529f2a08ef33fc"}, + {file = "contourpy-1.3.3-cp314-cp314t-win_arm64.whl", hash = "sha256:b7448cb5a725bb1e35ce88771b86fba35ef418952474492cf7c764059933ff8b"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cd5dfcaeb10f7b7f9dc8941717c6c2ade08f587be2226222c12b25f0483ed497"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:0c1fc238306b35f246d61a1d416a627348b5cf0648648a031e14bb8705fcdfe8"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70f9aad7de812d6541d29d2bbf8feb22ff7e1c299523db288004e3157ff4674e"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ed3657edf08512fc3fe81b510e35c2012fbd3081d2e26160f27ca28affec989"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:3d1a3799d62d45c18bafd41c5fa05120b96a28079f2393af559b843d1a966a77"}, + {file = "contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880"}, +] + +[package.dependencies] +numpy = ">=1.25" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["bokeh", "contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.17.0)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] [[package]] name = "curl-cffi" -version = "0.13.0" +version = "0.14.0" description = "libcurl ffi bindings for Python, with impersonation support." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "curl_cffi-0.13.0-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:434cadbe8df2f08b2fc2c16dff2779fb40b984af99c06aa700af898e185bb9db"}, - {file = "curl_cffi-0.13.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:59afa877a9ae09efa04646a7d068eeea48915a95d9add0a29854e7781679fcd7"}, - {file = "curl_cffi-0.13.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d06ed389e45a7ca97b17c275dbedd3d6524560270e675c720e93a2018a766076"}, - {file = "curl_cffi-0.13.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4e0de45ab3b7a835c72bd53640c2347415111b43421b5c7a1a0b18deae2e541"}, - {file = "curl_cffi-0.13.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8eb4083371bbb94e9470d782de235fb5268bf43520de020c9e5e6be8f395443f"}, - {file = "curl_cffi-0.13.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:28911b526e8cd4aa0e5e38401bfe6887e8093907272f1f67ca22e6beb2933a51"}, - {file = "curl_cffi-0.13.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6d433ffcb455ab01dd0d7bde47109083aa38b59863aa183d29c668ae4c96bf8e"}, - {file = "curl_cffi-0.13.0-cp39-abi3-win_amd64.whl", hash = "sha256:66a6b75ce971de9af64f1b6812e275f60b88880577bac47ef1fa19694fa21cd3"}, - {file = "curl_cffi-0.13.0-cp39-abi3-win_arm64.whl", hash = "sha256:d438a3b45244e874794bc4081dc1e356d2bb926dcc7021e5a8fef2e2105ef1d8"}, - {file = "curl_cffi-0.13.0.tar.gz", hash = "sha256:62ecd90a382bd5023750e3606e0aa7cb1a3a8ba41c14270b8e5e149ebf72c5ca"}, + {file = "curl_cffi-0.14.0-cp39-abi3-macosx_14_0_arm64.whl", hash = "sha256:e35e89c6a69872f9749d6d5fda642ed4fc159619329e99d577d0104c9aad5893"}, + {file = "curl_cffi-0.14.0-cp39-abi3-macosx_15_0_x86_64.whl", hash = "sha256:5945478cd28ad7dfb5c54473bcfb6743ee1d66554d57951fdf8fc0e7d8cf4e45"}, + {file = "curl_cffi-0.14.0-cp39-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c42e8fa3c667db9ccd2e696ee47adcd3cd5b0838d7282f3fc45f6c0ef3cfdfa7"}, + {file = "curl_cffi-0.14.0-cp39-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:060fe2c99c41d3cb7f894de318ddf4b0301b08dca70453d769bd4e74b36b8483"}, + {file = "curl_cffi-0.14.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b158c41a25388690dd0d40b5bc38d1e0f512135f17fdb8029868cbc1993d2e5b"}, + {file = "curl_cffi-0.14.0-cp39-abi3-manylinux_2_28_i686.whl", hash = "sha256:1439fbef3500fb723333c826adf0efb0e2e5065a703fb5eccce637a2250db34a"}, + {file = "curl_cffi-0.14.0-cp39-abi3-manylinux_2_34_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e7176f2c2d22b542e3cf261072a81deb018cfa7688930f95dddef215caddb469"}, + {file = "curl_cffi-0.14.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:03f21ade2d72978c2bb8670e9b6de5260e2755092b02d94b70b906813662998d"}, + {file = "curl_cffi-0.14.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:58ebf02de64ee5c95613209ddacb014c2d2f86298d7080c0a1c12ed876ee0690"}, + {file = "curl_cffi-0.14.0-cp39-abi3-win_amd64.whl", hash = "sha256:6e503f9a103f6ae7acfb3890c843b53ec030785a22ae7682a22cc43afb94123e"}, + {file = "curl_cffi-0.14.0-cp39-abi3-win_arm64.whl", hash = "sha256:2eed50a969201605c863c4c31269dfc3e0da52916086ac54553cfa353022425c"}, + {file = "curl_cffi-0.14.0.tar.gz", hash = "sha256:5ffbc82e59f05008ec08ea432f0e535418823cda44178ee518906a54f27a5f0f"}, ] [package.dependencies] @@ -559,9 +683,25 @@ cffi = ">=1.12.0" [package.extras] build = ["cibuildwheel", "wheel"] -dev = ["charset_normalizer (>=3.3.2,<4.0)", "coverage (>=6.4.1,<7.0)", "cryptography (>=42.0.5,<43.0)", "httpx (==0.23.1)", "mypy (>=1.9.0,<2.0)", "pytest (>=8.1.1,<9.0)", "pytest-asyncio (>=0.23.6,<1.0)", "pytest-trio (>=0.8.0,<1.0)", "ruff (>=0.3.5,<1.0)", "trio (>=0.25.0,<1.0)", "trustme (>=1.1.0,<2.0)", "typing_extensions", "uvicorn (>=0.29.0,<1.0)", "websockets (>=12.0,<13.0)"] +dev = ["charset_normalizer (>=3.3.2,<4.0)", "coverage (>=6.4.1,<7.0)", "cryptography (>=42.0.5,<43.0)", "httpx (==0.23.1)", "mypy (>=1.9.0,<2.0)", "pytest (>=8.1.1,<9.0)", "pytest-asyncio (>=0.23.6,<1.0)", "pytest-trio (>=0.8.0,<1.0)", "ruff (>=0.3.5,<1.0)", "trio (>=0.25.0,<1.0)", "trustme (>=1.1.0,<2.0)", "typing_extensions", "uvicorn (>=0.29.0,<1.0)", "websockets (>=14.0)"] extra = ["lxml_html_clean", "markdownify (>=1.1.0)", "readability-lxml (>=0.8.1)"] -test = ["charset_normalizer (>=3.3.2,<4.0)", "cryptography (>=42.0.5,<43.0)", "fastapi (==0.110.0)", "httpx (==0.23.1)", "proxy.py (>=2.4.3,<3.0)", "pytest (>=8.1.1,<9.0)", "pytest-asyncio (>=0.23.6,<1.0)", "pytest-trio (>=0.8.0,<1.0)", "python-multipart (>=0.0.9,<1.0)", "trio (>=0.25.0,<1.0)", "trustme (>=1.1.0,<2.0)", "typing_extensions", "uvicorn (>=0.29.0,<1.0)", "websockets (>=12.0,<13.0)"] +test = ["charset_normalizer (>=3.3.2,<4.0)", "cryptography (>=42.0.5,<43.0)", "fastapi (>=0.110.0,<1.0)", "httpx (==0.23.1)", "proxy.py (>=2.4.3,<3.0)", "pytest (>=8.1.1,<9.0)", "pytest-asyncio (>=0.23.6,<1.0)", "pytest-trio (>=0.8.0,<1.0)", "python-multipart (>=0.0.9,<1.0)", "trio (>=0.25.0,<1.0)", "trustme (>=1.1.0,<2.0)", "typing_extensions", "uvicorn (>=0.29.0,<1.0)", "websockets (>=14.0)"] + +[[package]] +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, +] + +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] [[package]] name = "cyclic" @@ -575,6 +715,57 @@ files = [ {file = "cyclic-1.0.0.tar.gz", hash = "sha256:ecddd56cb831ee3e6b79f61ecb0ad71caee606c507136867782911aa01c3e5eb"}, ] +[[package]] +name = "dateparser" +version = "1.2.2" +description = "Date parsing library designed to parse dates from HTML pages" +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "dateparser-1.2.2-py3-none-any.whl", hash = "sha256:5a5d7211a09013499867547023a2a0c91d5a27d15dd4dbcea676ea9fe66f2482"}, + {file = "dateparser-1.2.2.tar.gz", hash = "sha256:986316f17cb8cdc23ea8ce563027c5ef12fc725b6fb1d137c14ca08777c5ecf7"}, +] + +[package.dependencies] +python-dateutil = ">=2.7.0" +pytz = ">=2024.2" +regex = ">=2024.9.11" +tzlocal = ">=0.2" + +[package.extras] +calendars = ["convertdate (>=2.2.1)", "hijridate"] +fasttext = ["fasttext (>=0.9.1)", "numpy (>=1.19.3,<2)"] +langdetect = ["langdetect (>=1.0.0)"] + +[[package]] +name = "decorator" +version = "5.2.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a"}, + {file = "decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360"}, +] + +[[package]] +name = "dill" +version = "0.4.1" +description = "serialize all of Python" +optional = false +python-versions = ">=3.9" +groups = ["vectorbt"] +files = [ + {file = "dill-0.4.1-py3-none-any.whl", hash = "sha256:1e1ce33e978ae97fcfcff5638477032b801c46c7c65cf717f95fbc2248f79a9d"}, + {file = "dill-0.4.1.tar.gz", hash = "sha256:423092df4182177d4d8ba8290c8a5b640c66ab35ec7da59ccfa00f6fa3eea5fa"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + [[package]] name = "empyrical-reloaded" version = "0.5.12" @@ -603,14 +794,14 @@ yfinance = ["yfinance (>=0.1.63)"] [[package]] name = "exchange-calendars" -version = "4.11.2" +version = "4.12" description = "Calendars for securities exchanges" optional = false python-versions = "<4,>=3.10" groups = ["main"] files = [ - {file = "exchange_calendars-4.11.2-py3-none-any.whl", hash = "sha256:00b8672b567d52ff81ebfb0e9be8b3932f343de98496b65bbda4ee48540a2d36"}, - {file = "exchange_calendars-4.11.2.tar.gz", hash = "sha256:cbdd4930a1357fbaebab613643fc0ad5d3405fa47acc0e27d2328e792e685ea6"}, + {file = "exchange_calendars-4.12-py3-none-any.whl", hash = "sha256:133946b468d00401f7e622fc4e198ec3a26cba7524ec357bb8597be4ddaf584e"}, + {file = "exchange_calendars-4.12.tar.gz", hash = "sha256:7d2049b624013eb67cb06feb4fd29825a8d53c0c03bf8e7f9d856c210f24eb8a"}, ] [package.dependencies] @@ -621,53 +812,193 @@ pyluach = ">=2.3.0" toolz = ">=1.0.0" tzdata = ">=2025.2" +[[package]] +name = "executing" +version = "2.2.1" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017"}, + {file = "executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4"}, +] + +[package.extras] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] + +[[package]] +name = "fonttools" +version = "4.61.1" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.10" +groups = ["vectorbt"] +files = [ + {file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c7db70d57e5e1089a274cbb2b1fd635c9a24de809a231b154965d415d6c6d24"}, + {file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5fe9fd43882620017add5eabb781ebfbc6998ee49b35bd7f8f79af1f9f99a958"}, + {file = "fonttools-4.61.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8db08051fc9e7d8bc622f2112511b8107d8f27cd89e2f64ec45e9825e8288da"}, + {file = "fonttools-4.61.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a76d4cb80f41ba94a6691264be76435e5f72f2cb3cab0b092a6212855f71c2f6"}, + {file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a13fc8aeb24bad755eea8f7f9d409438eb94e82cf86b08fe77a03fbc8f6a96b1"}, + {file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b846a1fcf8beadeb9ea4f44ec5bdde393e2f1569e17d700bfc49cd69bde75881"}, + {file = "fonttools-4.61.1-cp310-cp310-win32.whl", hash = "sha256:78a7d3ab09dc47ac1a363a493e6112d8cabed7ba7caad5f54dbe2f08676d1b47"}, + {file = "fonttools-4.61.1-cp310-cp310-win_amd64.whl", hash = "sha256:eff1ac3cc66c2ac7cda1e64b4e2f3ffef474b7335f92fc3833fc632d595fcee6"}, + {file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c6604b735bb12fef8e0efd5578c9fb5d3d8532d5001ea13a19cddf295673ee09"}, + {file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5ce02f38a754f207f2f06557523cd39a06438ba3aafc0639c477ac409fc64e37"}, + {file = "fonttools-4.61.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77efb033d8d7ff233385f30c62c7c79271c8885d5c9657d967ede124671bbdfb"}, + {file = "fonttools-4.61.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:75c1a6dfac6abd407634420c93864a1e274ebc1c7531346d9254c0d8f6ca00f9"}, + {file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0de30bfe7745c0d1ffa2b0b7048fb7123ad0d71107e10ee090fa0b16b9452e87"}, + {file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58b0ee0ab5b1fc9921eccfe11d1435added19d6494dde14e323f25ad2bc30c56"}, + {file = "fonttools-4.61.1-cp311-cp311-win32.whl", hash = "sha256:f79b168428351d11e10c5aeb61a74e1851ec221081299f4cf56036a95431c43a"}, + {file = "fonttools-4.61.1-cp311-cp311-win_amd64.whl", hash = "sha256:fe2efccb324948a11dd09d22136fe2ac8a97d6c1347cf0b58a911dcd529f66b7"}, + {file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f3cb4a569029b9f291f88aafc927dd53683757e640081ca8c412781ea144565e"}, + {file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41a7170d042e8c0024703ed13b71893519a1a6d6e18e933e3ec7507a2c26a4b2"}, + {file = "fonttools-4.61.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10d88e55330e092940584774ee5e8a6971b01fc2f4d3466a1d6c158230880796"}, + {file = "fonttools-4.61.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15acc09befd16a0fb8a8f62bc147e1a82817542d72184acca9ce6e0aeda9fa6d"}, + {file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6bcdf33aec38d16508ce61fd81838f24c83c90a1d1b8c68982857038673d6b8"}, + {file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5fade934607a523614726119164ff621e8c30e8fa1ffffbbd358662056ba69f0"}, + {file = "fonttools-4.61.1-cp312-cp312-win32.whl", hash = "sha256:75da8f28eff26defba42c52986de97b22106cb8f26515b7c22443ebc9c2d3261"}, + {file = "fonttools-4.61.1-cp312-cp312-win_amd64.whl", hash = "sha256:497c31ce314219888c0e2fce5ad9178ca83fe5230b01a5006726cdf3ac9f24d9"}, + {file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c56c488ab471628ff3bfa80964372fc13504ece601e0d97a78ee74126b2045c"}, + {file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dc492779501fa723b04d0ab1f5be046797fee17d27700476edc7ee9ae535a61e"}, + {file = "fonttools-4.61.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:64102ca87e84261419c3747a0d20f396eb024bdbeb04c2bfb37e2891f5fadcb5"}, + {file = "fonttools-4.61.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c1b526c8d3f615a7b1867f38a9410849c8f4aef078535742198e942fba0e9bd"}, + {file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:41ed4b5ec103bd306bb68f81dc166e77409e5209443e5773cb4ed837bcc9b0d3"}, + {file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b501c862d4901792adaec7c25b1ecc749e2662543f68bb194c42ba18d6eec98d"}, + {file = "fonttools-4.61.1-cp313-cp313-win32.whl", hash = "sha256:4d7092bb38c53bbc78e9255a59158b150bcdc115a1e3b3ce0b5f267dc35dd63c"}, + {file = "fonttools-4.61.1-cp313-cp313-win_amd64.whl", hash = "sha256:21e7c8d76f62ab13c9472ccf74515ca5b9a761d1bde3265152a6dc58700d895b"}, + {file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:fff4f534200a04b4a36e7ae3cb74493afe807b517a09e99cb4faa89a34ed6ecd"}, + {file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:d9203500f7c63545b4ce3799319fe4d9feb1a1b89b28d3cb5abd11b9dd64147e"}, + {file = "fonttools-4.61.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa646ecec9528bef693415c79a86e733c70a4965dd938e9a226b0fc64c9d2e6c"}, + {file = "fonttools-4.61.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:11f35ad7805edba3aac1a3710d104592df59f4b957e30108ae0ba6c10b11dd75"}, + {file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b931ae8f62db78861b0ff1ac017851764602288575d65b8e8ff1963fed419063"}, + {file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b148b56f5de675ee16d45e769e69f87623a4944f7443850bf9a9376e628a89d2"}, + {file = "fonttools-4.61.1-cp314-cp314-win32.whl", hash = "sha256:9b666a475a65f4e839d3d10473fad6d47e0a9db14a2f4a224029c5bfde58ad2c"}, + {file = "fonttools-4.61.1-cp314-cp314-win_amd64.whl", hash = "sha256:4f5686e1fe5fce75d82d93c47a438a25bf0d1319d2843a926f741140b2b16e0c"}, + {file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:e76ce097e3c57c4bcb67c5aa24a0ecdbd9f74ea9219997a707a4061fbe2707aa"}, + {file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9cfef3ab326780c04d6646f68d4b4742aae222e8b8ea1d627c74e38afcbc9d91"}, + {file = "fonttools-4.61.1-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a75c301f96db737e1c5ed5fd7d77d9c34466de16095a266509e13da09751bd19"}, + {file = "fonttools-4.61.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:91669ccac46bbc1d09e9273546181919064e8df73488ea087dcac3e2968df9ba"}, + {file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c33ab3ca9d3ccd581d58e989d67554e42d8d4ded94ab3ade3508455fe70e65f7"}, + {file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:664c5a68ec406f6b1547946683008576ef8b38275608e1cee6c061828171c118"}, + {file = "fonttools-4.61.1-cp314-cp314t-win32.whl", hash = "sha256:aed04cabe26f30c1647ef0e8fbb207516fd40fe9472e9439695f5c6998e60ac5"}, + {file = "fonttools-4.61.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2180f14c141d2f0f3da43f3a81bc8aa4684860f6b0e6f9e165a4831f24e6a23b"}, + {file = "fonttools-4.61.1-py3-none-any.whl", hash = "sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371"}, + {file = "fonttools-4.61.1.tar.gz", hash = "sha256:6675329885c44657f826ef01d9e4fb33b9158e9d93c537d84ad8399539bc6f69"}, +] + +[package.extras] +all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.45.0)", "unicodedata2 (>=17.0.0) ; python_version <= \"3.14\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""] +lxml = ["lxml (>=4.0)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.45.0)"] +symfont = ["sympy"] +type1 = ["xattr ; sys_platform == \"darwin\""] +unicode = ["unicodedata2 (>=17.0.0) ; python_version <= \"3.14\""] +woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"] + [[package]] name = "frozendict" -version = "2.4.6" +version = "2.4.7" description = "A simple immutable dictionary" optional = false python-versions = ">=3.6" groups = ["main"] files = [ - {file = "frozendict-2.4.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c3a05c0a50cab96b4bb0ea25aa752efbfceed5ccb24c007612bc63e51299336f"}, - {file = "frozendict-2.4.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f5b94d5b07c00986f9e37a38dd83c13f5fe3bf3f1ccc8e88edea8fe15d6cd88c"}, - {file = "frozendict-2.4.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4c789fd70879ccb6289a603cdebdc4953e7e5dea047d30c1b180529b28257b5"}, - {file = "frozendict-2.4.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da6a10164c8a50b34b9ab508a9420df38f4edf286b9ca7b7df8a91767baecb34"}, - {file = "frozendict-2.4.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9a8a43036754a941601635ea9c788ebd7a7efbed2becba01b54a887b41b175b9"}, - {file = "frozendict-2.4.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c9905dcf7aa659e6a11b8051114c9fa76dfde3a6e50e6dc129d5aece75b449a2"}, - {file = "frozendict-2.4.6-cp310-cp310-win_amd64.whl", hash = "sha256:323f1b674a2cc18f86ab81698e22aba8145d7a755e0ac2cccf142ee2db58620d"}, - {file = "frozendict-2.4.6-cp310-cp310-win_arm64.whl", hash = "sha256:eabd21d8e5db0c58b60d26b4bb9839cac13132e88277e1376970172a85ee04b3"}, - {file = "frozendict-2.4.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eddabeb769fab1e122d3a6872982c78179b5bcc909fdc769f3cf1964f55a6d20"}, - {file = "frozendict-2.4.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:377a65be0a700188fc21e669c07de60f4f6d35fae8071c292b7df04776a1c27b"}, - {file = "frozendict-2.4.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce1e9217b85eec6ba9560d520d5089c82dbb15f977906eb345d81459723dd7e3"}, - {file = "frozendict-2.4.6-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:7291abacf51798d5ffe632771a69c14fb423ab98d63c4ccd1aa382619afe2f89"}, - {file = "frozendict-2.4.6-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:e72fb86e48811957d66ffb3e95580af7b1af1e6fbd760ad63d7bd79b2c9a07f8"}, - {file = "frozendict-2.4.6-cp36-cp36m-win_amd64.whl", hash = "sha256:622301b1c29c4f9bba633667d592a3a2b093cb408ba3ce578b8901ace3931ef3"}, - {file = "frozendict-2.4.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a4e3737cb99ed03200cd303bdcd5514c9f34b29ee48f405c1184141bd68611c9"}, - {file = "frozendict-2.4.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49ffaf09241bc1417daa19362a2241a4aa435f758fd4375c39ce9790443a39cd"}, - {file = "frozendict-2.4.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d69418479bfb834ba75b0e764f058af46ceee3d655deb6a0dd0c0c1a5e82f09"}, - {file = "frozendict-2.4.6-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:c131f10c4d3906866454c4e89b87a7e0027d533cce8f4652aa5255112c4d6677"}, - {file = "frozendict-2.4.6-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:fc67cbb3c96af7a798fab53d52589752c1673027e516b702ab355510ddf6bdff"}, - {file = "frozendict-2.4.6-cp37-cp37m-win_amd64.whl", hash = "sha256:7730f8ebe791d147a1586cbf6a42629351d4597773317002181b66a2da0d509e"}, - {file = "frozendict-2.4.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:807862e14b0e9665042458fde692c4431d660c4219b9bb240817f5b918182222"}, - {file = "frozendict-2.4.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9647c74efe3d845faa666d4853cfeabbaee403b53270cabfc635b321f770e6b8"}, - {file = "frozendict-2.4.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:665fad3f0f815aa41294e561d98dbedba4b483b3968e7e8cab7d728d64b96e33"}, - {file = "frozendict-2.4.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f42e6b75254ea2afe428ad6d095b62f95a7ae6d4f8272f0bd44a25dddd20f67"}, - {file = "frozendict-2.4.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:02331541611f3897f260900a1815b63389654951126e6e65545e529b63c08361"}, - {file = "frozendict-2.4.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:18d50a2598350b89189da9150058191f55057581e40533e470db46c942373acf"}, - {file = "frozendict-2.4.6-cp38-cp38-win_amd64.whl", hash = "sha256:1b4a3f8f6dd51bee74a50995c39b5a606b612847862203dd5483b9cd91b0d36a"}, - {file = "frozendict-2.4.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a76cee5c4be2a5d1ff063188232fffcce05dde6fd5edd6afe7b75b247526490e"}, - {file = "frozendict-2.4.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba5ef7328706db857a2bdb2c2a17b4cd37c32a19c017cff1bb7eeebc86b0f411"}, - {file = "frozendict-2.4.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:669237c571856be575eca28a69e92a3d18f8490511eff184937283dc6093bd67"}, - {file = "frozendict-2.4.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0aaa11e7c472150efe65adbcd6c17ac0f586896096ab3963775e1c5c58ac0098"}, - {file = "frozendict-2.4.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b8f2829048f29fe115da4a60409be2130e69402e29029339663fac39c90e6e2b"}, - {file = "frozendict-2.4.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:94321e646cc39bebc66954a31edd1847d3a2a3483cf52ff051cd0996e7db07db"}, - {file = "frozendict-2.4.6-cp39-cp39-win_amd64.whl", hash = "sha256:74b6b26c15dddfefddeb89813e455b00ebf78d0a3662b89506b4d55c6445a9f4"}, - {file = "frozendict-2.4.6-cp39-cp39-win_arm64.whl", hash = "sha256:7088102345d1606450bd1801a61139bbaa2cb0d805b9b692f8d81918ea835da6"}, - {file = "frozendict-2.4.6-py311-none-any.whl", hash = "sha256:d065db6a44db2e2375c23eac816f1a022feb2fa98cbb50df44a9e83700accbea"}, - {file = "frozendict-2.4.6-py312-none-any.whl", hash = "sha256:49344abe90fb75f0f9fdefe6d4ef6d4894e640fadab71f11009d52ad97f370b9"}, - {file = "frozendict-2.4.6-py313-none-any.whl", hash = "sha256:7134a2bb95d4a16556bb5f2b9736dceb6ea848fa5b6f3f6c2d6dba93b44b4757"}, - {file = "frozendict-2.4.6.tar.gz", hash = "sha256:df7cd16470fbd26fc4969a208efadc46319334eb97def1ddf48919b351192b8e"}, + {file = "frozendict-2.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bd37c087a538944652363cfd77fb7abe8100cc1f48afea0b88b38bf0f469c3d2"}, + {file = "frozendict-2.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2b96f224a5431889f04b2bc99c0e9abe285679464273ead83d7d7f2a15907d35"}, + {file = "frozendict-2.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5c1781f28c4bbb177644b3cb6d5cf7da59be374b02d91cdde68d1d5ef32e046b"}, + {file = "frozendict-2.4.7-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:8a06f6c3d3b8d487226fdde93f621e04a54faecc5bf5d9b16497b8f9ead0ac3e"}, + {file = "frozendict-2.4.7-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b809d1c861436a75b2b015dbfd94f6154fa4e7cb0a70e389df1d5f6246b21d1e"}, + {file = "frozendict-2.4.7-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:75eefdf257a84ea73d553eb80d0abbff0af4c9df62529e4600fd3f96ff17eeb3"}, + {file = "frozendict-2.4.7-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a4d2b27d8156922c9739dd2ff4f3934716e17cfd1cf6fb61aa17af7d378555e9"}, + {file = "frozendict-2.4.7-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2ebd953c41408acfb8041ff9e6c3519c09988fb7e007df7ab6b56e229029d788"}, + {file = "frozendict-2.4.7-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c64d34b802912ee6d107936e970b90750385a1fdfd38d310098b2918ba4cbf2"}, + {file = "frozendict-2.4.7-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:294a7d7d51dd979021a8691b46aedf9bd4a594ce3ed33a4bdf0a712d6929d712"}, + {file = "frozendict-2.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f65d1b90e9ddc791ea82ef91a9ae0ab27ef6c0cfa88fadfa0e5ca5a22f8fa22f"}, + {file = "frozendict-2.4.7-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:82d5272d08451bcef6fb6235a0a04cf1816b6b6815cec76be5ace1de17e0c1a4"}, + {file = "frozendict-2.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5943c3f683d3f32036f6ca975e920e383d85add1857eee547742de9c1f283716"}, + {file = "frozendict-2.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:88c6bea948da03087035bb9ca9625305d70e084aa33f11e17048cb7dda4ca293"}, + {file = "frozendict-2.4.7-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:ffd1a9f9babec9119712e76a39397d8aa0d72ef8c4ccad917c6175d7e7f81b74"}, + {file = "frozendict-2.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0ff6f57854cc8aa8b30947ec005f9246d96e795a78b21441614e85d39b708822"}, + {file = "frozendict-2.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d774df483c12d6cba896eb9a1337bbc5ad3f564eb18cfaaee3e95fb4402f2a86"}, + {file = "frozendict-2.4.7-cp310-cp310-win32.whl", hash = "sha256:a10d38fa300f6bef230fae1fdb4bc98706b78c8a3a2f3140fde748469ef3cfe8"}, + {file = "frozendict-2.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:dd518f300e5eb6a8827bee380f2e1a31c01dc0af069b13abdecd4e5769bd8a97"}, + {file = "frozendict-2.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:3842cfc2d69df5b9978f2e881b7678a282dbdd6846b11b5159f910bc633cbe4f"}, + {file = "frozendict-2.4.7-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:735be62d757e1e7e496ccb6401efe82b473faa653e95eec0826cd7819a29a34c"}, + {file = "frozendict-2.4.7-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fff8584e3bbdc5c1713cd016fbf4b88babfffd4e5e89b39020f2a208dd24c900"}, + {file = "frozendict-2.4.7-cp36-cp36m-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:91a06ee46b3e3ef3b237046b914c0c905eab9fdfeac677e9b51473b482e24c28"}, + {file = "frozendict-2.4.7-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd7ba56cf6340c732ecb78787c4e9600c4bd01372af7313ded21037126d33ec6"}, + {file = "frozendict-2.4.7-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1b4426457757c30ad86b57cdbcc0adaa328399f1ec3d231a0a2ce7447248987"}, + {file = "frozendict-2.4.7-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b22d337c76b765cb7961d4ee47fe29f89e30921eb47bf856b14dc7641f4df3e5"}, + {file = "frozendict-2.4.7-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57134ef5df1dd32229c148c75a7b89245dbdb89966a155d6dfd4bda653e8c7af"}, + {file = "frozendict-2.4.7-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:c89617a784e1c24a31f5aa4809402f8072a26b64ddbc437897f6391ff69b0ee9"}, + {file = "frozendict-2.4.7-cp36-cp36m-musllinux_1_2_armv7l.whl", hash = "sha256:176dd384dfe1d0d79449e05f67764c57c6f0f3095378bf00deb33165d5d2df5b"}, + {file = "frozendict-2.4.7-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:b1a94e8935c69ae30043b465af496f447950f2c03660aee8657074084faae0b3"}, + {file = "frozendict-2.4.7-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:c570649ceccfa5e11ad9351e9009dc484c315a51a56aa02ced07ae97644bb7aa"}, + {file = "frozendict-2.4.7-cp36-cp36m-musllinux_1_2_s390x.whl", hash = "sha256:e0d450c9d444befe2668bf9386ac2945a2f38152248d58f6b3feea63db59ba08"}, + {file = "frozendict-2.4.7-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7469912c1a04102457871ff675aebe600dbb7e79a6450a166cc8079b88f6ca79"}, + {file = "frozendict-2.4.7-cp36-cp36m-win32.whl", hash = "sha256:2808bab8e21887a8c106cca5f6f0ab5bda7ee81e159409a10f53d57542ccd99c"}, + {file = "frozendict-2.4.7-cp36-cp36m-win_amd64.whl", hash = "sha256:ca17ac727ffeeba6c46f5a88e0284a7cb1520fb03127645fcdd7041080adf849"}, + {file = "frozendict-2.4.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8ef11dd996208c5a96eab0683f7a17cb4b992948464d2498520efd75a10a2aac"}, + {file = "frozendict-2.4.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b960e700dc95faca7dd6919d0dce183ef89bfe01554d323cf5de7331a2e80f83"}, + {file = "frozendict-2.4.7-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fc43257a06e6117da6a8a0779243b974cdb9205fed82e32eb669f6746c75d27d"}, + {file = "frozendict-2.4.7-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ece525da7d0aa3eb56c3e479f30612028d545081c15450d67d771a303ee7d4c"}, + {file = "frozendict-2.4.7-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ddffe7c0b3be414f88185e212758989c65b497315781290eb029e2c1e1fd64e"}, + {file = "frozendict-2.4.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05dd27415f913cd11649009f53d97eb565ce7b76787d7869c4733738c10e8d27"}, + {file = "frozendict-2.4.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0664092614d2b9d0aa404731f33ad5459a54fe8dab9d1fd45aa714fa6de4d0ef"}, + {file = "frozendict-2.4.7-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:830d181781bb263c9fa430b81f82c867546f5dcb368e73931c8591f533a04afb"}, + {file = "frozendict-2.4.7-cp37-cp37m-musllinux_1_2_armv7l.whl", hash = "sha256:c93827e0854393cd904b927ceb529afc17776706f5b9e45c7eaf6a40b3fc7b25"}, + {file = "frozendict-2.4.7-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:6d30dbba6eb1497c695f3108c2c292807e7a237c67a1b9ff92c04e89969d22d1"}, + {file = "frozendict-2.4.7-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:ec846bde66b75d68518c7b24a0a46d09db0aee5a6aefd2209d9901faf6e9df21"}, + {file = "frozendict-2.4.7-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:1df8e22f7d24172c08434b10911f3971434bb5a59b4d1b0078ae33a623625294"}, + {file = "frozendict-2.4.7-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:39abe54264ae69a0b2e00fabdb5118604f36a5b927d33e7532cd594c5142ebf4"}, + {file = "frozendict-2.4.7-cp37-cp37m-win32.whl", hash = "sha256:d10c2ea7c90ba204cd053167ba214d0cdd00f3184c7b8d117a56d7fd2b0c6553"}, + {file = "frozendict-2.4.7-cp37-cp37m-win_amd64.whl", hash = "sha256:346a53640f15c1640a3503f60ba99df39e4ab174979f10db4304bbb378df5cbd"}, + {file = "frozendict-2.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:cc520f3f4af14f456143a534d554175dbc0f0636ffd653e63675cd591862a9d9"}, + {file = "frozendict-2.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7fd0d0bd3a79e009dddbf5fedfd927ad495c218cd7b13a112d28a37e2079725c"}, + {file = "frozendict-2.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a404857e48d85a517bb5b974d740f8c4fccb25d8df98885f3a2a4d950870b845"}, + {file = "frozendict-2.4.7-cp38-cp38-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f42e2c25d3eee4ea3da88466f38ed0dce8c622a1a9d92572e5ee53b7a6bb9ef1"}, + {file = "frozendict-2.4.7-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a1a083e9ee7a1904e545a6307c7db1dd76200077520fcbf7a98d886f81b57dd7"}, + {file = "frozendict-2.4.7-cp38-cp38-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f556ea05d9c5f6dae50d57ce6234e4ab1fbf4551dd0d52b4fed6ef537d9f3d3c"}, + {file = "frozendict-2.4.7-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:739ee81e574f33b46f1e6d9312f3ec2c549bdd574a4ebb6bf106775c9d85ca7b"}, + {file = "frozendict-2.4.7-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:48ab42b01952bc11543577de9fe5d9ca7c41b35dda36326a07fb47d84b3d5f22"}, + {file = "frozendict-2.4.7-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34233deb8d09e798e874a6ac00b054d2e842164d982ebd43eb91b9f0a6a34876"}, + {file = "frozendict-2.4.7-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:76bd99f3508cb2ec87976f2e3fe7d92fb373a661cacffb863013d15e4cfaf0eb"}, + {file = "frozendict-2.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a265e95e7087f44b88a6d78a63ea95a2ca0eb0a21ab4f76047f4c164a8beb413"}, + {file = "frozendict-2.4.7-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:1662f1b72b4f4a2ffdfdc4981ece275ca11f90244208ac1f1fc2c17fc9c9437a"}, + {file = "frozendict-2.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:2e5d2c30f4a3fea83a14b0a5722f21c10de5c755ab5637c70de5eb60886d58cd"}, + {file = "frozendict-2.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2cf0a665bf2f1ce69d3cd8b6d3574b1d32ae00981a16fa1d255d2da8a2e44b7c"}, + {file = "frozendict-2.4.7-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:708382875c3cfe91be625dddcba03dee2dfdadbad2c431568a8c7f2f2af0bbee"}, + {file = "frozendict-2.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:7fe194f37052a8f45a1a8507e36229e28b79f3d21542ae55ea6a18c6a444f625"}, + {file = "frozendict-2.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d8930877a2dd40461968d9238d95c754e51b33ce7d2a45500f88ffeed5cb7202"}, + {file = "frozendict-2.4.7-cp38-cp38-win32.whl", hash = "sha256:6991469a889ee8a108fe5ed1b044447c7b7a07da9067e93c59cbfac8c1d625cf"}, + {file = "frozendict-2.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:ebae8f4a07372acfc3963fc8d68070cdaab70272c3dd836f057ebbe9b7d38643"}, + {file = "frozendict-2.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1c521ad3d747aa475e9040e231f5f1847c04423bae5571c010a9d969e6983c40"}, + {file = "frozendict-2.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:70e655c3aa5f893807830f549a7275031a181dbebeaf74c461b51adc755d9335"}, + {file = "frozendict-2.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11d35075f979c96f528d74ccbf89322a7ef8211977dd566bc384985ebce689be"}, + {file = "frozendict-2.4.7-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d4d7ec24d3bfcfac3baf4dffd7fcea3fa8474b087ce32696232132064aa062cf"}, + {file = "frozendict-2.4.7-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5694417864875ca959932e3b98e2b7d5d27c75177bf510939d0da583712ddf58"}, + {file = "frozendict-2.4.7-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:57a754671c5746e11140363aa2f4e7a75c8607de6e85a2bf89dcd1daf51885a7"}, + {file = "frozendict-2.4.7-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:313e0e1d8b22b317aa1f7dd48aec8cbb0416ddd625addf7648a69148fcb9ccff"}, + {file = "frozendict-2.4.7-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:176a66094428b9fd66270927b9787e3b8b1c9505ef92723c7b0ef1923dbe3c4a"}, + {file = "frozendict-2.4.7-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de1fff2683d8af01299ec01eb21a24b6097ce92015fc1fbefa977cecf076a3fc"}, + {file = "frozendict-2.4.7-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:115a822ecd754574e11205e0880e9d61258d960863d6fd1b90883aa800f6d3b3"}, + {file = "frozendict-2.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:de8d2c98777ba266f5466e211778d4e3bd00635a207c54f6f7511d8613b86dd3"}, + {file = "frozendict-2.4.7-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:1e307be0e1f26cbc9593f6bdad5238a1408a50f39f63c9c39eb93c7de5926767"}, + {file = "frozendict-2.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:78a55f320ca924545494ce153df02d4349156cd95dc4603c1f0e80c42c889249"}, + {file = "frozendict-2.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:e89492dfcc4c27a718f8b5a4c8df1a2dec6c689718cccd70cb2ceba69ab8c642"}, + {file = "frozendict-2.4.7-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:1e801d62e35df24be2c6f7f43c114058712efa79a8549c289437754dad0207a3"}, + {file = "frozendict-2.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3ed9e2f3547a59f4ef5c233614c6faa6221d33004cb615ae1c07ffc551cfe178"}, + {file = "frozendict-2.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ad0448ed5569f0a9b9b010af9fb5b6d9bdc0b4b877a3ddb188396c4742e62284"}, + {file = "frozendict-2.4.7-cp39-cp39-win32.whl", hash = "sha256:eab9ef8a9268042e819de03079b984eb0894f05a7b63c4e5319b1cf1ef362ba7"}, + {file = "frozendict-2.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:8dfe2f4840b043436ee5bdd07b0fa5daecedf086e6957e7df050a56ab6db078d"}, + {file = "frozendict-2.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:cc2085926872a1b26deda4b81b2254d2e5d2cb2c4d7b327abe4c820b7c93f40b"}, + {file = "frozendict-2.4.7-py3-none-any.whl", hash = "sha256:972af65924ea25cf5b4d9326d549e69a9a4918d8a76a9d3a7cd174d98b237550"}, + {file = "frozendict-2.4.7.tar.gz", hash = "sha256:e478fb2a1391a56c8a6e10cc97c4a9002b410ecd1ac28c18d780661762e271bd"}, ] [[package]] @@ -688,98 +1019,62 @@ python-dateutil = ">=2.8.1" [package.extras] dev = ["flake8", "markdown", "twine", "wheel"] -[[package]] -name = "googleapis-common-protos" -version = "1.71.0" -description = "Common protobufs used in Google APIs" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "googleapis_common_protos-1.71.0-py3-none-any.whl", hash = "sha256:59034a1d849dc4d18971997a72ac56246570afdd17f9369a0ff68218d50ab78c"}, - {file = "googleapis_common_protos-1.71.0.tar.gz", hash = "sha256:1aec01e574e29da63c80ba9f7bbf1ccfaacf1da877f23609fe236ca7c72a2e2e"}, -] - -[package.dependencies] -protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" - -[package.extras] -grpc = ["grpcio (>=1.44.0,<2.0.0)"] - [[package]] name = "greenlet" -version = "3.2.4" +version = "3.3.0" description = "Lightweight in-process concurrent programming" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "greenlet-3.2.4-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8c68325b0d0acf8d91dde4e6f930967dd52a5302cd4062932a6b2e7c2969f47c"}, - {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:94385f101946790ae13da500603491f04a76b6e4c059dab271b3ce2e283b2590"}, - {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f10fd42b5ee276335863712fa3da6608e93f70629c631bf77145021600abc23c"}, - {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c8c9e331e58180d0d83c5b7999255721b725913ff6bc6cf39fa2a45841a4fd4b"}, - {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:58b97143c9cc7b86fc458f215bd0932f1757ce649e05b640fea2e79b54cedb31"}, - {file = "greenlet-3.2.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2ca18a03a8cfb5b25bc1cbe20f3d9a4c80d8c3b13ba3df49ac3961af0b1018d"}, - {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9fe0a28a7b952a21e2c062cd5756d34354117796c6d9215a87f55e38d15402c5"}, - {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8854167e06950ca75b898b104b63cc646573aa5fef1353d4508ecdd1ee76254f"}, - {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f47617f698838ba98f4ff4189aef02e7343952df3a615f847bb575c3feb177a7"}, - {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af41be48a4f60429d5cad9d22175217805098a9ef7c40bfef44f7669fb9d74d8"}, - {file = "greenlet-3.2.4-cp310-cp310-win_amd64.whl", hash = "sha256:73f49b5368b5359d04e18d15828eecc1806033db5233397748f4ca813ff1056c"}, - {file = "greenlet-3.2.4-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:96378df1de302bc38e99c3a9aa311967b7dc80ced1dcc6f171e99842987882a2"}, - {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1ee8fae0519a337f2329cb78bd7a8e128ec0f881073d43f023c7b8d4831d5246"}, - {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:94abf90142c2a18151632371140b3dba4dee031633fe614cb592dbb6c9e17bc3"}, - {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:4d1378601b85e2e5171b99be8d2dc85f594c79967599328f95c1dc1a40f1c633"}, - {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0db5594dce18db94f7d1650d7489909b57afde4c580806b8d9203b6e79cdc079"}, - {file = "greenlet-3.2.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2523e5246274f54fdadbce8494458a2ebdcdbc7b802318466ac5606d3cded1f8"}, - {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1987de92fec508535687fb807a5cea1560f6196285a4cde35c100b8cd632cc52"}, - {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:55e9c5affaa6775e2c6b67659f3a71684de4c549b3dd9afca3bc773533d284fa"}, - {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9c6de1940a7d828635fbd254d69db79e54619f165ee7ce32fda763a9cb6a58c"}, - {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03c5136e7be905045160b1b9fdca93dd6727b180feeafda6818e6496434ed8c5"}, - {file = "greenlet-3.2.4-cp311-cp311-win_amd64.whl", hash = "sha256:9c40adce87eaa9ddb593ccb0fa6a07caf34015a29bf8d344811665b573138db9"}, - {file = "greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd"}, - {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb"}, - {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968"}, - {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9"}, - {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6"}, - {file = "greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0"}, - {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0"}, - {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f"}, - {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0"}, - {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d"}, - {file = "greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02"}, - {file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"}, - {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"}, - {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc"}, - {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a"}, - {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504"}, - {file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"}, - {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"}, - {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"}, - {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b"}, - {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929"}, - {file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"}, - {file = "greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0"}, - {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f"}, - {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5"}, - {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1"}, - {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735"}, - {file = "greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337"}, - {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269"}, - {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681"}, - {file = "greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01"}, - {file = "greenlet-3.2.4-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:b6a7c19cf0d2742d0809a4c05975db036fdff50cd294a93632d6a310bf9ac02c"}, - {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:27890167f55d2387576d1f41d9487ef171849ea0359ce1510ca6e06c8bece11d"}, - {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:18d9260df2b5fbf41ae5139e1be4e796d99655f023a636cd0e11e6406cca7d58"}, - {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:671df96c1f23c4a0d4077a325483c1503c96a1b7d9db26592ae770daa41233d4"}, - {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:16458c245a38991aa19676900d48bd1a6f2ce3e16595051a4db9d012154e8433"}, - {file = "greenlet-3.2.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9913f1a30e4526f432991f89ae263459b1c64d1608c0d22a5c79c287b3c70df"}, - {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b90654e092f928f110e0007f572007c9727b5265f7632c2fa7415b4689351594"}, - {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81701fd84f26330f0d5f4944d4e92e61afe6319dcd9775e39396e39d7c3e5f98"}, - {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:28a3c6b7cd72a96f61b0e4b2a36f681025b60ae4779cc73c1535eb5f29560b10"}, - {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:52206cd642670b0b320a1fd1cbfd95bca0e043179c1d8a045f2c6109dfe973be"}, - {file = "greenlet-3.2.4-cp39-cp39-win32.whl", hash = "sha256:65458b409c1ed459ea899e939f0e1cdb14f58dbc803f2f93c5eab5694d32671b"}, - {file = "greenlet-3.2.4-cp39-cp39-win_amd64.whl", hash = "sha256:d2e685ade4dafd447ede19c31277a224a239a0a1a4eca4e6390efedf20260cfb"}, - {file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"}, + {file = "greenlet-3.3.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:6f8496d434d5cb2dce025773ba5597f71f5410ae499d5dd9533e0653258cdb3d"}, + {file = "greenlet-3.3.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b96dc7eef78fd404e022e165ec55327f935b9b52ff355b067eb4a0267fc1cffb"}, + {file = "greenlet-3.3.0-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:73631cd5cccbcfe63e3f9492aaa664d278fda0ce5c3d43aeda8e77317e38efbd"}, + {file = "greenlet-3.3.0-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b299a0cb979f5d7197442dccc3aee67fce53500cd88951b7e6c35575701c980b"}, + {file = "greenlet-3.3.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7dee147740789a4632cace364816046e43310b59ff8fb79833ab043aefa72fd5"}, + {file = "greenlet-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:39b28e339fc3c348427560494e28d8a6f3561c8d2bcf7d706e1c624ed8d822b9"}, + {file = "greenlet-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b3c374782c2935cc63b2a27ba8708471de4ad1abaa862ffdb1ef45a643ddbb7d"}, + {file = "greenlet-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:b49e7ed51876b459bd645d83db257f0180e345d3f768a35a85437a24d5a49082"}, + {file = "greenlet-3.3.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e29f3018580e8412d6aaf5641bb7745d38c85228dacf51a73bd4e26ddf2a6a8e"}, + {file = "greenlet-3.3.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a687205fb22794e838f947e2194c0566d3812966b41c78709554aa883183fb62"}, + {file = "greenlet-3.3.0-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4243050a88ba61842186cb9e63c7dfa677ec146160b0efd73b855a3d9c7fcf32"}, + {file = "greenlet-3.3.0-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:670d0f94cd302d81796e37299bcd04b95d62403883b24225c6b5271466612f45"}, + {file = "greenlet-3.3.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cb3a8ec3db4a3b0eb8a3c25436c2d49e3505821802074969db017b87bc6a948"}, + {file = "greenlet-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2de5a0b09eab81fc6a382791b995b1ccf2b172a9fec934747a7a23d2ff291794"}, + {file = "greenlet-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4449a736606bd30f27f8e1ff4678ee193bc47f6ca810d705981cfffd6ce0d8c5"}, + {file = "greenlet-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:7652ee180d16d447a683c04e4c5f6441bae7ba7b17ffd9f6b3aff4605e9e6f71"}, + {file = "greenlet-3.3.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:b01548f6e0b9e9784a2c99c5651e5dc89ffcbe870bc5fb2e5ef864e9cc6b5dcb"}, + {file = "greenlet-3.3.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:349345b770dc88f81506c6861d22a6ccd422207829d2c854ae2af8025af303e3"}, + {file = "greenlet-3.3.0-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e8e18ed6995e9e2c0b4ed264d2cf89260ab3ac7e13555b8032b25a74c6d18655"}, + {file = "greenlet-3.3.0-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c024b1e5696626890038e34f76140ed1daf858e37496d33f2af57f06189e70d7"}, + {file = "greenlet-3.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:047ab3df20ede6a57c35c14bf5200fcf04039d50f908270d3f9a7a82064f543b"}, + {file = "greenlet-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d9ad37fc657b1102ec880e637cccf20191581f75c64087a549e66c57e1ceb53"}, + {file = "greenlet-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83cd0e36932e0e7f36a64b732a6f60c2fc2df28c351bae79fbaf4f8092fe7614"}, + {file = "greenlet-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7a34b13d43a6b78abf828a6d0e87d3385680eaf830cd60d20d52f249faabf39"}, + {file = "greenlet-3.3.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a1e41a81c7e2825822f4e068c48cb2196002362619e2d70b148f20a831c00739"}, + {file = "greenlet-3.3.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f515a47d02da4d30caaa85b69474cec77b7929b2e936ff7fb853d42f4bf8808"}, + {file = "greenlet-3.3.0-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7d2d9fd66bfadf230b385fdc90426fcd6eb64db54b40c495b72ac0feb5766c54"}, + {file = "greenlet-3.3.0-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30a6e28487a790417d036088b3bcb3f3ac7d8babaa7d0139edbaddebf3af9492"}, + {file = "greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527"}, + {file = "greenlet-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab97cf74045343f6c60a39913fa59710e4bd26a536ce7ab2397adf8b27e67c39"}, + {file = "greenlet-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5375d2e23184629112ca1ea89a53389dddbffcf417dad40125713d88eb5f96e8"}, + {file = "greenlet-3.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:9ee1942ea19550094033c35d25d20726e4f1c40d59545815e1128ac58d416d38"}, + {file = "greenlet-3.3.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:60c2ef0f578afb3c8d92ea07ad327f9a062547137afe91f38408f08aacab667f"}, + {file = "greenlet-3.3.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5d554d0712ba1de0a6c94c640f7aeba3f85b3a6e1f2899c11c2c0428da9365"}, + {file = "greenlet-3.3.0-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3a898b1e9c5f7307ebbde4102908e6cbfcb9ea16284a3abe15cab996bee8b9b3"}, + {file = "greenlet-3.3.0-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dcd2bdbd444ff340e8d6bdf54d2f206ccddbb3ccfdcd3c25bf4afaa7b8f0cf45"}, + {file = "greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5773edda4dc00e173820722711d043799d3adb4f01731f40619e07ea2750b955"}, + {file = "greenlet-3.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ac0549373982b36d5fd5d30beb8a7a33ee541ff98d2b502714a09f1169f31b55"}, + {file = "greenlet-3.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d198d2d977460358c3b3a4dc844f875d1adb33817f0613f663a656f463764ccc"}, + {file = "greenlet-3.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:73f51dd0e0bdb596fb0417e475fa3c5e32d4c83638296e560086b8d7da7c4170"}, + {file = "greenlet-3.3.0-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:d6ed6f85fae6cdfdb9ce04c9bf7a08d666cfcfb914e7d006f44f840b46741931"}, + {file = "greenlet-3.3.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d9125050fcf24554e69c4cacb086b87b3b55dc395a8b3ebe6487b045b2614388"}, + {file = "greenlet-3.3.0-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:87e63ccfa13c0a0f6234ed0add552af24cc67dd886731f2261e46e241608bee3"}, + {file = "greenlet-3.3.0-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2662433acbca297c9153a4023fe2161c8dcfdcc91f10433171cf7e7d94ba2221"}, + {file = "greenlet-3.3.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3c6e9b9c1527a78520357de498b0e709fb9e2f49c3a513afd5a249007261911b"}, + {file = "greenlet-3.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:286d093f95ec98fdd92fcb955003b8a3d054b4e2cab3e2707a5039e7b50520fd"}, + {file = "greenlet-3.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c10513330af5b8ae16f023e8ddbfb486ab355d04467c4679c5cfe4659975dd9"}, + {file = "greenlet-3.3.0.tar.gz", hash = "sha256:a82bb225a4e9e4d653dd2fb7b8b2d36e4fb25bc0165422a11e48b88e9e6f78fb"}, ] [package.extras] @@ -788,95 +1083,21 @@ test = ["objgraph", "psutil", "setuptools"] [[package]] name = "griffe" -version = "1.14.0" +version = "1.15.0" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["docs"] files = [ - {file = "griffe-1.14.0-py3-none-any.whl", hash = "sha256:0e9d52832cccf0f7188cfe585ba962d2674b241c01916d780925df34873bceb0"}, - {file = "griffe-1.14.0.tar.gz", hash = "sha256:9d2a15c1eca966d68e00517de5d69dd1bc5c9f2335ef6c1775362ba5b8651a13"}, + {file = "griffe-1.15.0-py3-none-any.whl", hash = "sha256:6f6762661949411031f5fcda9593f586e6ce8340f0ba88921a0f2ef7a81eb9a3"}, + {file = "griffe-1.15.0.tar.gz", hash = "sha256:7726e3afd6f298fbc3696e67958803e7ac843c1cfe59734b6251a40cdbfb5eea"}, ] [package.dependencies] colorama = ">=0.4" -[[package]] -name = "grpcio" -version = "1.76.0" -description = "HTTP/2-based RPC framework" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "grpcio-1.76.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:65a20de41e85648e00305c1bb09a3598f840422e522277641145a32d42dcefcc"}, - {file = "grpcio-1.76.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:40ad3afe81676fd9ec6d9d406eda00933f218038433980aa19d401490e46ecde"}, - {file = "grpcio-1.76.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:035d90bc79eaa4bed83f524331d55e35820725c9fbb00ffa1904d5550ed7ede3"}, - {file = "grpcio-1.76.0-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:4215d3a102bd95e2e11b5395c78562967959824156af11fa93d18fdd18050990"}, - {file = "grpcio-1.76.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:49ce47231818806067aea3324d4bf13825b658ad662d3b25fada0bdad9b8a6af"}, - {file = "grpcio-1.76.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8cc3309d8e08fd79089e13ed4819d0af72aa935dd8f435a195fd152796752ff2"}, - {file = "grpcio-1.76.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:971fd5a1d6e62e00d945423a567e42eb1fa678ba89072832185ca836a94daaa6"}, - {file = "grpcio-1.76.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9d9adda641db7207e800a7f089068f6f645959f2df27e870ee81d44701dd9db3"}, - {file = "grpcio-1.76.0-cp310-cp310-win32.whl", hash = "sha256:063065249d9e7e0782d03d2bca50787f53bd0fb89a67de9a7b521c4a01f1989b"}, - {file = "grpcio-1.76.0-cp310-cp310-win_amd64.whl", hash = "sha256:a6ae758eb08088d36812dd5d9af7a9859c05b1e0f714470ea243694b49278e7b"}, - {file = "grpcio-1.76.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:2e1743fbd7f5fa713a1b0a8ac8ebabf0ec980b5d8809ec358d488e273b9cf02a"}, - {file = "grpcio-1.76.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:a8c2cf1209497cf659a667d7dea88985e834c24b7c3b605e6254cbb5076d985c"}, - {file = "grpcio-1.76.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:08caea849a9d3c71a542827d6df9d5a69067b0a1efbea8a855633ff5d9571465"}, - {file = "grpcio-1.76.0-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f0e34c2079d47ae9f6188211db9e777c619a21d4faba6977774e8fa43b085e48"}, - {file = "grpcio-1.76.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8843114c0cfce61b40ad48df65abcfc00d4dba82eae8718fab5352390848c5da"}, - {file = "grpcio-1.76.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8eddfb4d203a237da6f3cc8a540dad0517d274b5a1e9e636fd8d2c79b5c1d397"}, - {file = "grpcio-1.76.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:32483fe2aab2c3794101c2a159070584e5db11d0aa091b2c0ea9c4fc43d0d749"}, - {file = "grpcio-1.76.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dcfe41187da8992c5f40aa8c5ec086fa3672834d2be57a32384c08d5a05b4c00"}, - {file = "grpcio-1.76.0-cp311-cp311-win32.whl", hash = "sha256:2107b0c024d1b35f4083f11245c0e23846ae64d02f40b2b226684840260ed054"}, - {file = "grpcio-1.76.0-cp311-cp311-win_amd64.whl", hash = "sha256:522175aba7af9113c48ec10cc471b9b9bd4f6ceb36aeb4544a8e2c80ed9d252d"}, - {file = "grpcio-1.76.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:81fd9652b37b36f16138611c7e884eb82e0cec137c40d3ef7c3f9b3ed00f6ed8"}, - {file = "grpcio-1.76.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:04bbe1bfe3a68bbfd4e52402ab7d4eb59d72d02647ae2042204326cf4bbad280"}, - {file = "grpcio-1.76.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d388087771c837cdb6515539f43b9d4bf0b0f23593a24054ac16f7a960be16f4"}, - {file = "grpcio-1.76.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:9f8f757bebaaea112c00dba718fc0d3260052ce714e25804a03f93f5d1c6cc11"}, - {file = "grpcio-1.76.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:980a846182ce88c4f2f7e2c22c56aefd515daeb36149d1c897f83cf57999e0b6"}, - {file = "grpcio-1.76.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f92f88e6c033db65a5ae3d97905c8fea9c725b63e28d5a75cb73b49bda5024d8"}, - {file = "grpcio-1.76.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4baf3cbe2f0be3289eb68ac8ae771156971848bb8aaff60bad42005539431980"}, - {file = "grpcio-1.76.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:615ba64c208aaceb5ec83bfdce7728b80bfeb8be97562944836a7a0a9647d882"}, - {file = "grpcio-1.76.0-cp312-cp312-win32.whl", hash = "sha256:45d59a649a82df5718fd9527ce775fd66d1af35e6d31abdcdc906a49c6822958"}, - {file = "grpcio-1.76.0-cp312-cp312-win_amd64.whl", hash = "sha256:c088e7a90b6017307f423efbb9d1ba97a22aa2170876223f9709e9d1de0b5347"}, - {file = "grpcio-1.76.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:26ef06c73eb53267c2b319f43e6634c7556ea37672029241a056629af27c10e2"}, - {file = "grpcio-1.76.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:45e0111e73f43f735d70786557dc38141185072d7ff8dc1829d6a77ac1471468"}, - {file = "grpcio-1.76.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:83d57312a58dcfe2a3a0f9d1389b299438909a02db60e2f2ea2ae2d8034909d3"}, - {file = "grpcio-1.76.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:3e2a27c89eb9ac3d81ec8835e12414d73536c6e620355d65102503064a4ed6eb"}, - {file = "grpcio-1.76.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61f69297cba3950a524f61c7c8ee12e55c486cb5f7db47ff9dcee33da6f0d3ae"}, - {file = "grpcio-1.76.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6a15c17af8839b6801d554263c546c69c4d7718ad4321e3166175b37eaacca77"}, - {file = "grpcio-1.76.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:25a18e9810fbc7e7f03ec2516addc116a957f8cbb8cbc95ccc80faa072743d03"}, - {file = "grpcio-1.76.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:931091142fd8cc14edccc0845a79248bc155425eee9a98b2db2ea4f00a235a42"}, - {file = "grpcio-1.76.0-cp313-cp313-win32.whl", hash = "sha256:5e8571632780e08526f118f74170ad8d50fb0a48c23a746bef2a6ebade3abd6f"}, - {file = "grpcio-1.76.0-cp313-cp313-win_amd64.whl", hash = "sha256:f9f7bd5faab55f47231ad8dba7787866b69f5e93bc306e3915606779bbfb4ba8"}, - {file = "grpcio-1.76.0-cp314-cp314-linux_armv7l.whl", hash = "sha256:ff8a59ea85a1f2191a0ffcc61298c571bc566332f82e5f5be1b83c9d8e668a62"}, - {file = "grpcio-1.76.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:06c3d6b076e7b593905d04fdba6a0525711b3466f43b3400266f04ff735de0cd"}, - {file = "grpcio-1.76.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fd5ef5932f6475c436c4a55e4336ebbe47bd3272be04964a03d316bbf4afbcbc"}, - {file = "grpcio-1.76.0-cp314-cp314-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b331680e46239e090f5b3cead313cc772f6caa7d0fc8de349337563125361a4a"}, - {file = "grpcio-1.76.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2229ae655ec4e8999599469559e97630185fdd53ae1e8997d147b7c9b2b72cba"}, - {file = "grpcio-1.76.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:490fa6d203992c47c7b9e4a9d39003a0c2bcc1c9aa3c058730884bbbb0ee9f09"}, - {file = "grpcio-1.76.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:479496325ce554792dba6548fae3df31a72cef7bad71ca2e12b0e58f9b336bfc"}, - {file = "grpcio-1.76.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1c9b93f79f48b03ada57ea24725d83a30284a012ec27eab2cf7e50a550cbbbcc"}, - {file = "grpcio-1.76.0-cp314-cp314-win32.whl", hash = "sha256:747fa73efa9b8b1488a95d0ba1039c8e2dca0f741612d80415b1e1c560febf4e"}, - {file = "grpcio-1.76.0-cp314-cp314-win_amd64.whl", hash = "sha256:922fa70ba549fce362d2e2871ab542082d66e2aaf0c19480ea453905b01f384e"}, - {file = "grpcio-1.76.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:8ebe63ee5f8fa4296b1b8cfc743f870d10e902ca18afc65c68cf46fd39bb0783"}, - {file = "grpcio-1.76.0-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:3bf0f392c0b806905ed174dcd8bdd5e418a40d5567a05615a030a5aeddea692d"}, - {file = "grpcio-1.76.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b7604868b38c1bfd5cf72d768aedd7db41d78cb6a4a18585e33fb0f9f2363fd"}, - {file = "grpcio-1.76.0-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:e6d1db20594d9daba22f90da738b1a0441a7427552cc6e2e3d1297aeddc00378"}, - {file = "grpcio-1.76.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d099566accf23d21037f18a2a63d323075bebace807742e4b0ac210971d4dd70"}, - {file = "grpcio-1.76.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ebea5cc3aa8ea72e04df9913492f9a96d9348db876f9dda3ad729cfedf7ac416"}, - {file = "grpcio-1.76.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0c37db8606c258e2ee0c56b78c62fc9dee0e901b5dbdcf816c2dd4ad652b8b0c"}, - {file = "grpcio-1.76.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ebebf83299b0cb1721a8859ea98f3a77811e35dce7609c5c963b9ad90728f886"}, - {file = "grpcio-1.76.0-cp39-cp39-win32.whl", hash = "sha256:0aaa82d0813fd4c8e589fac9b65d7dd88702555f702fb10417f96e2a2a6d4c0f"}, - {file = "grpcio-1.76.0-cp39-cp39-win_amd64.whl", hash = "sha256:acab0277c40eff7143c2323190ea57b9ee5fd353d8190ee9652369fae735668a"}, - {file = "grpcio-1.76.0.tar.gz", hash = "sha256:7be78388d6da1a25c0d5ec506523db58b18be22d9c37d8d3a32c08be4987bd73"}, -] - -[package.dependencies] -typing-extensions = ">=4.12,<5.0" - [package.extras] -protobuf = ["grpcio-tools (>=1.76.0)"] +pypi = ["pip (>=24.0)", "platformdirs (>=4.2)", "wheel (>=0.42)"] [[package]] name = "h11" @@ -996,7 +1217,7 @@ version = "3.11" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.8" -groups = ["main", "docs"] +groups = ["main", "docs", "vectorbt"] files = [ {file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"}, {file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"}, @@ -1005,6 +1226,109 @@ files = [ [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] +[[package]] +name = "imageio" +version = "2.37.2" +description = "Read and write images and video across all major formats. Supports scientific and volumetric data." +optional = false +python-versions = ">=3.9" +groups = ["vectorbt"] +files = [ + {file = "imageio-2.37.2-py3-none-any.whl", hash = "sha256:ad9adfb20335d718c03de457358ed69f141021a333c40a53e57273d8a5bd0b9b"}, + {file = "imageio-2.37.2.tar.gz", hash = "sha256:0212ef2727ac9caa5ca4b2c75ae89454312f440a756fcfc8ef1993e718f50f8a"}, +] + +[package.dependencies] +numpy = "*" +pillow = ">=8.3.2" + +[package.extras] +all-plugins = ["astropy", "av", "fsspec[http]", "imageio-ffmpeg", "numpy (>2)", "pillow-heif", "psutil", "rawpy", "tifffile"] +all-plugins-pypy = ["fsspec[http]", "imageio-ffmpeg", "pillow-heif", "psutil", "tifffile"] +dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"] +docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"] +ffmpeg = ["imageio-ffmpeg", "psutil"] +fits = ["astropy"] +freeimage = ["fsspec[http]"] +full = ["astropy", "av", "black", "flake8", "fsspec[github,http]", "imageio-ffmpeg", "numpy (>2)", "numpydoc", "pillow-heif", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "rawpy", "sphinx (<6)", "tifffile"] +gdal = ["gdal"] +itk = ["itk"] +linting = ["black", "flake8"] +pillow-heif = ["pillow-heif"] +pyav = ["av"] +rawpy = ["numpy (>2)", "rawpy"] +test = ["fsspec[github]", "pytest", "pytest-cov"] +tifffile = ["tifffile"] + +[[package]] +name = "ipython" +version = "9.9.0" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.11" +groups = ["vectorbt"] +files = [ + {file = "ipython-9.9.0-py3-none-any.whl", hash = "sha256:b457fe9165df2b84e8ec909a97abcf2ed88f565970efba16b1f7229c283d252b"}, + {file = "ipython-9.9.0.tar.gz", hash = "sha256:48fbed1b2de5e2c7177eefa144aba7fcb82dac514f09b57e2ac9da34ddb54220"}, +] + +[package.dependencies] +colorama = {version = ">=0.4.4", markers = "sys_platform == \"win32\""} +decorator = ">=4.3.2" +ipython-pygments-lexers = ">=1.0.0" +jedi = ">=0.18.1" +matplotlib-inline = ">=0.1.5" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} +prompt_toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.11.0" +stack_data = ">=0.6.0" +traitlets = ">=5.13.0" + +[package.extras] +all = ["argcomplete (>=3.0)", "ipython[doc,matplotlib,terminal,test,test-extra]"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "intersphinx_registry", "ipykernel", "ipython[matplotlib,test]", "setuptools (>=70.0)", "sphinx (>=8.0)", "sphinx-rtd-theme (>=0.1.8)", "sphinx_toml (==0.0.4)", "typing_extensions"] +matplotlib = ["matplotlib (>3.9)"] +test = ["packaging (>=20.1.0)", "pytest (>=7.0.0)", "pytest-asyncio (>=1.0.0)", "setuptools (>=61.2)", "testpath (>=0.2)"] +test-extra = ["curio", "ipykernel (>6.30)", "ipython[matplotlib]", "ipython[test]", "jupyter_ai", "nbclient", "nbformat", "numpy (>=1.27)", "pandas (>2.1)", "trio (>=0.1.0)"] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +description = "Defines a variety of Pygments lexers for highlighting IPython code." +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c"}, + {file = "ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81"}, +] + +[package.dependencies] +pygments = "*" + +[[package]] +name = "ipywidgets" +version = "8.1.8" +description = "Jupyter interactive widgets" +optional = false +python-versions = ">=3.7" +groups = ["vectorbt"] +files = [ + {file = "ipywidgets-8.1.8-py3-none-any.whl", hash = "sha256:ecaca67aed704a338f88f67b1181b58f821ab5dc89c1f0f5ef99db43c1c2921e"}, + {file = "ipywidgets-8.1.8.tar.gz", hash = "sha256:61f969306b95f85fba6b6986b7fe45d73124d1d9e3023a8068710d47a22ea668"}, +] + +[package.dependencies] +comm = ">=0.1.3" +ipython = ">=6.1.0" +jupyterlab_widgets = ">=3.0.15,<3.1.0" +traitlets = ">=4.3.1" +widgetsnbextension = ">=4.0.14,<4.1.0" + +[package.extras] +test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] + [[package]] name = "iso3166" version = "2.1.1" @@ -1019,16 +1343,36 @@ files = [ [[package]] name = "iso4217" -version = "1.14.20250512" +version = "1.15.20260101" description = "ISO 4217 currency data package for Python" optional = false python-versions = ">=3.6" groups = ["main"] files = [ - {file = "iso4217-1.14.20250512-py2.py3-none-any.whl", hash = "sha256:f23974b5b6eb264709f6d764703d5c03f168807305077cb76d7cdfcca87d7d76"}, - {file = "iso4217-1.14.20250512.tar.gz", hash = "sha256:f036bd5460f1d371f2df253e90be5032930c8f76512f3a02ecfa5ad1e9fb164b"}, + {file = "iso4217-1.15.20260101-py2.py3-none-any.whl", hash = "sha256:acfe988f6bd3599cf89ebbf06247bcb5d6483e69ab11a0210904d5369774a036"}, + {file = "iso4217-1.15.20260101.tar.gz", hash = "sha256:fde8d0ff97cd5fc4879daf785fe6efcba89bad9b3cb95722b30598324b9786b6"}, +] + +[[package]] +name = "jedi" +version = "0.19.2" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +groups = ["vectorbt"] +files = [ + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, ] +[package.dependencies] +parso = ">=0.8.4,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] + [[package]] name = "jinja2" version = "3.1.6" @@ -1049,14 +1393,137 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "joblib" -version = "1.5.2" +version = "1.5.3" description = "Lightweight pipelining with Python functions" optional = false python-versions = ">=3.9" -groups = ["main"] +groups = ["main", "vectorbt"] +files = [ + {file = "joblib-1.5.3-py3-none-any.whl", hash = "sha256:5fc3c5039fc5ca8c0276333a188bbd59d6b7ab37fe6632daa76bc7f9ec18e713"}, + {file = "joblib-1.5.3.tar.gz", hash = "sha256:8561a3269e6801106863fd0d6d84bb737be9e7631e33aaed3fb9ce5953688da3"}, +] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.16" +description = "Jupyter interactive widgets for JupyterLab" +optional = false +python-versions = ">=3.7" +groups = ["vectorbt"] +files = [ + {file = "jupyterlab_widgets-3.0.16-py3-none-any.whl", hash = "sha256:45fa36d9c6422cf2559198e4db481aa243c7a32d9926b500781c830c80f7ecf8"}, + {file = "jupyterlab_widgets-3.0.16.tar.gz", hash = "sha256:423da05071d55cf27a9e602216d35a3a65a3e41cdf9c5d3b643b814ce38c19e0"}, +] + +[[package]] +name = "kiwisolver" +version = "1.4.9" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.10" +groups = ["vectorbt"] files = [ - {file = "joblib-1.5.2-py3-none-any.whl", hash = "sha256:4e1f0bdbb987e6d843c70cf43714cb276623def372df3c22fe5266b2670bc241"}, - {file = "joblib-1.5.2.tar.gz", hash = "sha256:3faa5c39054b2f03ca547da9b2f52fde67c06240c31853f306aea97f13647b55"}, + {file = "kiwisolver-1.4.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b4b4d74bda2b8ebf4da5bd42af11d02d04428b2c32846e4c2c93219df8a7987b"}, + {file = "kiwisolver-1.4.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fb3b8132019ea572f4611d770991000d7f58127560c4889729248eb5852a102f"}, + {file = "kiwisolver-1.4.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:84fd60810829c27ae375114cd379da1fa65e6918e1da405f356a775d49a62bcf"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b78efa4c6e804ecdf727e580dbb9cba85624d2e1c6b5cb059c66290063bd99a9"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4efec7bcf21671db6a3294ff301d2fc861c31faa3c8740d1a94689234d1b415"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:90f47e70293fc3688b71271100a1a5453aa9944a81d27ff779c108372cf5567b"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fdca1def57a2e88ef339de1737a1449d6dbf5fab184c54a1fca01d541317154"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9cf554f21be770f5111a1690d42313e140355e687e05cf82cb23d0a721a64a48"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fc1795ac5cd0510207482c3d1d3ed781143383b8cfd36f5c645f3897ce066220"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ccd09f20ccdbbd341b21a67ab50a119b64a403b09288c27481575105283c1586"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:540c7c72324d864406a009d72f5d6856f49693db95d1fbb46cf86febef873634"}, + {file = "kiwisolver-1.4.9-cp310-cp310-win_amd64.whl", hash = "sha256:ede8c6d533bc6601a47ad4046080d36b8fc99f81e6f1c17b0ac3c2dc91ac7611"}, + {file = "kiwisolver-1.4.9-cp310-cp310-win_arm64.whl", hash = "sha256:7b4da0d01ac866a57dd61ac258c5607b4cd677f63abaec7b148354d2b2cdd536"}, + {file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eb14a5da6dc7642b0f3a18f13654847cd8b7a2550e2645a5bda677862b03ba16"}, + {file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39a219e1c81ae3b103643d2aedb90f1ef22650deb266ff12a19e7773f3e5f089"}, + {file = "kiwisolver-1.4.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2405a7d98604b87f3fc28b1716783534b1b4b8510d8142adca34ee0bc3c87543"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dc1ae486f9abcef254b5618dfb4113dd49f94c68e3e027d03cf0143f3f772b61"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a1f570ce4d62d718dce3f179ee78dac3b545ac16c0c04bb363b7607a949c0d1"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb27e7b78d716c591e88e0a09a2139c6577865d7f2e152488c2cc6257f460872"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:15163165efc2f627eb9687ea5f3a28137217d217ac4024893d753f46bce9de26"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bdee92c56a71d2b24c33a7d4c2856bd6419d017e08caa7802d2963870e315028"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:412f287c55a6f54b0650bd9b6dce5aceddb95864a1a90c87af16979d37c89771"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2c93f00dcba2eea70af2be5f11a830a742fe6b579a1d4e00f47760ef13be247a"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f117e1a089d9411663a3207ba874f31be9ac8eaa5b533787024dc07aeb74f464"}, + {file = "kiwisolver-1.4.9-cp311-cp311-win_amd64.whl", hash = "sha256:be6a04e6c79819c9a8c2373317d19a96048e5a3f90bec587787e86a1153883c2"}, + {file = "kiwisolver-1.4.9-cp311-cp311-win_arm64.whl", hash = "sha256:0ae37737256ba2de764ddc12aed4956460277f00c4996d51a197e72f62f5eec7"}, + {file = "kiwisolver-1.4.9-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ac5a486ac389dddcc5bef4f365b6ae3ffff2c433324fb38dd35e3fab7c957999"}, + {file = "kiwisolver-1.4.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2ba92255faa7309d06fe44c3a4a97efe1c8d640c2a79a5ef728b685762a6fd2"}, + {file = "kiwisolver-1.4.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a2899935e724dd1074cb568ce7ac0dce28b2cd6ab539c8e001a8578eb106d14"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f6008a4919fdbc0b0097089f67a1eb55d950ed7e90ce2cc3e640abadd2757a04"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67bb8b474b4181770f926f7b7d2f8c0248cbcb78b660fdd41a47054b28d2a752"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2327a4a30d3ee07d2fbe2e7933e8a37c591663b96ce42a00bc67461a87d7df77"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7a08b491ec91b1d5053ac177afe5290adacf1f0f6307d771ccac5de30592d198"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8fc5c867c22b828001b6a38d2eaeb88160bf5783c6cb4a5e440efc981ce286d"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3b3115b2581ea35bb6d1f24a4c90af37e5d9b49dcff267eeed14c3893c5b86ab"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858e4c22fb075920b96a291928cb7dea5644e94c0ee4fcd5af7e865655e4ccf2"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ed0fecd28cc62c54b262e3736f8bb2512d8dcfdc2bcf08be5f47f96bf405b145"}, + {file = "kiwisolver-1.4.9-cp312-cp312-win_amd64.whl", hash = "sha256:f68208a520c3d86ea51acf688a3e3002615a7f0238002cccc17affecc86a8a54"}, + {file = "kiwisolver-1.4.9-cp312-cp312-win_arm64.whl", hash = "sha256:2c1a4f57df73965f3f14df20b80ee29e6a7930a57d2d9e8491a25f676e197c60"}, + {file = "kiwisolver-1.4.9-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a5d0432ccf1c7ab14f9949eec60c5d1f924f17c037e9f8b33352fa05799359b8"}, + {file = "kiwisolver-1.4.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efb3a45b35622bb6c16dbfab491a8f5a391fe0e9d45ef32f4df85658232ca0e2"}, + {file = "kiwisolver-1.4.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a12cf6398e8a0a001a059747a1cbf24705e18fe413bc22de7b3d15c67cffe3f"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b67e6efbf68e077dd71d1a6b37e43e1a99d0bff1a3d51867d45ee8908b931098"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5656aa670507437af0207645273ccdfee4f14bacd7f7c67a4306d0dcaeaf6eed"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bfc08add558155345129c7803b3671cf195e6a56e7a12f3dde7c57d9b417f525"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:40092754720b174e6ccf9e845d0d8c7d8e12c3d71e7fc35f55f3813e96376f78"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:497d05f29a1300d14e02e6441cf0f5ee81c1ff5a304b0d9fb77423974684e08b"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdd1a81a1860476eb41ac4bc1e07b3f07259e6d55bbf739b79c8aaedcf512799"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e6b93f13371d341afee3be9f7c5964e3fe61d5fa30f6a30eb49856935dfe4fc3"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d75aa530ccfaa593da12834b86a0724f58bff12706659baa9227c2ccaa06264c"}, + {file = "kiwisolver-1.4.9-cp313-cp313-win_amd64.whl", hash = "sha256:dd0a578400839256df88c16abddf9ba14813ec5f21362e1fe65022e00c883d4d"}, + {file = "kiwisolver-1.4.9-cp313-cp313-win_arm64.whl", hash = "sha256:d4188e73af84ca82468f09cadc5ac4db578109e52acb4518d8154698d3a87ca2"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:5a0f2724dfd4e3b3ac5a82436a8e6fd16baa7d507117e4279b660fe8ca38a3a1"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1b11d6a633e4ed84fc0ddafd4ebfd8ea49b3f25082c04ad12b8315c11d504dc1"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61874cdb0a36016354853593cffc38e56fc9ca5aa97d2c05d3dcf6922cd55a11"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:60c439763a969a6af93b4881db0eed8fadf93ee98e18cbc35bc8da868d0c4f0c"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92a2f997387a1b79a75e7803aa7ded2cfbe2823852ccf1ba3bcf613b62ae3197"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a31d512c812daea6d8b3be3b2bfcbeb091dbb09177706569bcfc6240dcf8b41c"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:52a15b0f35dad39862d376df10c5230155243a2c1a436e39eb55623ccbd68185"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a30fd6fdef1430fd9e1ba7b3398b5ee4e2887783917a687d86ba69985fb08748"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cc9617b46837c6468197b5945e196ee9ca43057bb7d9d1ae688101e4e1dddf64"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:0ab74e19f6a2b027ea4f845a78827969af45ce790e6cb3e1ebab71bdf9f215ff"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dba5ee5d3981160c28d5490f0d1b7ed730c22470ff7f6cc26cfcfaacb9896a07"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-win_arm64.whl", hash = "sha256:0749fd8f4218ad2e851e11cc4dc05c7cbc0cbc4267bdfdb31782e65aace4ee9c"}, + {file = "kiwisolver-1.4.9-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9928fe1eb816d11ae170885a74d074f57af3a0d65777ca47e9aeb854a1fba386"}, + {file = "kiwisolver-1.4.9-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d0005b053977e7b43388ddec89fa567f43d4f6d5c2c0affe57de5ebf290dc552"}, + {file = "kiwisolver-1.4.9-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2635d352d67458b66fd0667c14cb1d4145e9560d503219034a18a87e971ce4f3"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:767c23ad1c58c9e827b649a9ab7809fd5fd9db266a9cf02b0e926ddc2c680d58"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:72d0eb9fba308b8311685c2268cf7d0a0639a6cd027d8128659f72bdd8a024b4"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f68e4f3eeca8fb22cc3d731f9715a13b652795ef657a13df1ad0c7dc0e9731df"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d84cd4061ae292d8ac367b2c3fa3aad11cb8625a95d135fe93f286f914f3f5a6"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a60ea74330b91bd22a29638940d115df9dc00af5035a9a2a6ad9399ffb4ceca5"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ce6a3a4e106cf35c2d9c4fa17c05ce0b180db622736845d4315519397a77beaf"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:77937e5e2a38a7b48eef0585114fe7930346993a88060d0bf886086d2aa49ef5"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:24c175051354f4a28c5d6a31c93906dc653e2bf234e8a4bbfb964892078898ce"}, + {file = "kiwisolver-1.4.9-cp314-cp314-win_amd64.whl", hash = "sha256:0763515d4df10edf6d06a3c19734e2566368980d21ebec439f33f9eb936c07b7"}, + {file = "kiwisolver-1.4.9-cp314-cp314-win_arm64.whl", hash = "sha256:0e4e2bf29574a6a7b7f6cb5fa69293b9f96c928949ac4a53ba3f525dffb87f9c"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d976bbb382b202f71c67f77b0ac11244021cfa3f7dfd9e562eefcea2df711548"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2489e4e5d7ef9a1c300a5e0196e43d9c739f066ef23270607d45aba368b91f2d"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e2ea9f7ab7fbf18fffb1b5434ce7c69a07582f7acc7717720f1d69f3e806f90c"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b34e51affded8faee0dfdb705416153819d8ea9250bbbf7ea1b249bdeb5f1122"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8aacd3d4b33b772542b2e01beb50187536967b514b00003bdda7589722d2a64"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7cf974dd4e35fa315563ac99d6287a1024e4dc2077b8a7d7cd3d2fb65d283134"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:85bd218b5ecfbee8c8a82e121802dcb519a86044c9c3b2e4aef02fa05c6da370"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0856e241c2d3df4efef7c04a1e46b1936b6120c9bcf36dd216e3acd84bc4fb21"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9af39d6551f97d31a4deebeac6f45b156f9755ddc59c07b402c148f5dbb6482a"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:bb4ae2b57fc1d8cbd1cf7b1d9913803681ffa903e7488012be5b76dedf49297f"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:aedff62918805fb62d43a4aa2ecd4482c380dc76cd31bd7c8878588a61bd0369"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-win_amd64.whl", hash = "sha256:1fa333e8b2ce4d9660f2cda9c0e1b6bafcfb2457a9d259faa82289e73ec24891"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-win_arm64.whl", hash = "sha256:4a48a2ce79d65d363597ef7b567ce3d14d68783d2b2263d98db3d9477805ba32"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d1d9e582ad4d63062d34077a9a1e9f3c34088a2ec5135b1f7190c07cf366527"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:deed0c7258ceb4c44ad5ec7d9918f9f14fd05b2be86378d86cf50e63d1e7b771"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a590506f303f512dff6b7f75fd2fd18e16943efee932008fe7140e5fa91d80e"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e09c2279a4d01f099f52d5c4b3d9e208e91edcbd1a175c9662a8b16e000fece9"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c9e7cdf45d594ee04d5be1b24dd9d49f3d1590959b2271fb30b5ca2b262c00fb"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:720e05574713db64c356e86732c0f3c5252818d05f9df320f0ad8380641acea5"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:17680d737d5335b552994a2008fab4c851bcd7de33094a82067ef3a576ff02fa"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:85b5352f94e490c028926ea567fc569c52ec79ce131dadb968d3853e809518c2"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:464415881e4801295659462c49461a24fb107c140de781d55518c4b80cb6790f"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:fb940820c63a9590d31d88b815e7a3aa5915cad3ce735ab45f0c730b39547de1"}, + {file = "kiwisolver-1.4.9.tar.gz", hash = "sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d"}, ] [[package]] @@ -1108,6 +1575,37 @@ files = [ [package.dependencies] requests = "*" +[[package]] +name = "llvmlite" +version = "0.46.0" +description = "lightweight wrapper around basic LLVM functionality" +optional = false +python-versions = ">=3.10" +groups = ["vectorbt"] +files = [ + {file = "llvmlite-0.46.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4323177e936d61ae0f73e653e2e614284d97d14d5dd12579adc92b6c2b0597b0"}, + {file = "llvmlite-0.46.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a2d461cb89537b7c20feb04c46c32e12d5ad4f0896c9dfc0f60336219ff248e"}, + {file = "llvmlite-0.46.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b1f6595a35b7b39c3518b85a28bf18f45e075264e4b2dce3f0c2a4f232b4a910"}, + {file = "llvmlite-0.46.0-cp310-cp310-win_amd64.whl", hash = "sha256:e7a34d4aa6f9a97ee006b504be6d2b8cb7f755b80ab2f344dda1ef992f828559"}, + {file = "llvmlite-0.46.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:82f3d39b16f19aa1a56d5fe625883a6ab600d5cc9ea8906cca70ce94cabba067"}, + {file = "llvmlite-0.46.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a3df43900119803bbc52720e758c76f316a9a0f34612a886862dfe0a5591a17e"}, + {file = "llvmlite-0.46.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de183fefc8022d21b0aa37fc3e90410bc3524aed8617f0ff76732fc6c3af5361"}, + {file = "llvmlite-0.46.0-cp311-cp311-win_amd64.whl", hash = "sha256:e8b10bc585c58bdffec9e0c309bb7d51be1f2f15e169a4b4d42f2389e431eb93"}, + {file = "llvmlite-0.46.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b9588ad4c63b4f0175a3984b85494f0c927c6b001e3a246a3a7fb3920d9a137"}, + {file = "llvmlite-0.46.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3535bd2bb6a2d7ae4012681ac228e5132cdb75fefb1bcb24e33f2f3e0c865ed4"}, + {file = "llvmlite-0.46.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cbfd366e60ff87ea6cc62f50bc4cd800ebb13ed4c149466f50cf2163a473d1e"}, + {file = "llvmlite-0.46.0-cp312-cp312-win_amd64.whl", hash = "sha256:398b39db462c39563a97b912d4f2866cd37cba60537975a09679b28fbbc0fb38"}, + {file = "llvmlite-0.46.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:30b60892d034bc560e0ec6654737aaa74e5ca327bd8114d82136aa071d611172"}, + {file = "llvmlite-0.46.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6cc19b051753368a9c9f31dc041299059ee91aceec81bd57b0e385e5d5bf1a54"}, + {file = "llvmlite-0.46.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bca185892908f9ede48c0acd547fe4dc1bafefb8a4967d47db6cf664f9332d12"}, + {file = "llvmlite-0.46.0-cp313-cp313-win_amd64.whl", hash = "sha256:67438fd30e12349ebb054d86a5a1a57fd5e87d264d2451bcfafbbbaa25b82a35"}, + {file = "llvmlite-0.46.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:d252edfb9f4ac1fcf20652258e3f102b26b03eef738dc8a6ffdab7d7d341d547"}, + {file = "llvmlite-0.46.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:379fdd1c59badeff8982cb47e4694a6143bec3bb49aa10a466e095410522064d"}, + {file = "llvmlite-0.46.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e8cbfff7f6db0fa2c771ad24154e2a7e457c2444d7673e6de06b8b698c3b269"}, + {file = "llvmlite-0.46.0-cp314-cp314-win_amd64.whl", hash = "sha256:7821eda3ec1f18050f981819756631d60b6d7ab1a6cf806d9efefbe3f4082d61"}, + {file = "llvmlite-0.46.0.tar.gz", hash = "sha256:227c9fd6d09dce2783c18b754b7cd9d9b3b3515210c46acc2d3c5badd9870ceb"}, +] + [[package]] name = "mako" version = "1.3.10" @@ -1130,14 +1628,14 @@ testing = ["pytest"] [[package]] name = "markdown" -version = "3.9" +version = "3.10" description = "Python implementation of John Gruber's Markdown." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["docs"] files = [ - {file = "markdown-3.9-py3-none-any.whl", hash = "sha256:9f4d91ed810864ea88a6f32c07ba8bee1346c0cc1f6b1f9f6c822f2a9667d280"}, - {file = "markdown-3.9.tar.gz", hash = "sha256:d2900fe1782bd33bdbbd56859defef70c2e78fc46668f8eb9df3128138f2cb6a"}, + {file = "markdown-3.10-py3-none-any.whl", hash = "sha256:b5b99d6951e2e4948d939255596523444c0e677c669700b1d17aa4a8a464cb7c"}, + {file = "markdown-3.10.tar.gz", hash = "sha256:37062d4f2aa4b2b6b32aefb80faa300f82cc790cb949a35b8caede34f2b68c0e"}, ] [package.extras] @@ -1258,6 +1756,103 @@ files = [ {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, ] +[[package]] +name = "matplotlib" +version = "3.10.8" +description = "Python plotting package" +optional = false +python-versions = ">=3.10" +groups = ["vectorbt"] +files = [ + {file = "matplotlib-3.10.8-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:00270d217d6b20d14b584c521f810d60c5c78406dc289859776550df837dcda7"}, + {file = "matplotlib-3.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37b3c1cc42aa184b3f738cfa18c1c1d72fd496d85467a6cf7b807936d39aa656"}, + {file = "matplotlib-3.10.8-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ee40c27c795bda6a5292e9cff9890189d32f7e3a0bf04e0e3c9430c4a00c37df"}, + {file = "matplotlib-3.10.8-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a48f2b74020919552ea25d222d5cc6af9ca3f4eb43a93e14d068457f545c2a17"}, + {file = "matplotlib-3.10.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f254d118d14a7f99d616271d6c3c27922c092dac11112670b157798b89bf4933"}, + {file = "matplotlib-3.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:f9b587c9c7274c1613a30afabf65a272114cd6cdbe67b3406f818c79d7ab2e2a"}, + {file = "matplotlib-3.10.8-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6be43b667360fef5c754dda5d25a32e6307a03c204f3c0fc5468b78fa87b4160"}, + {file = "matplotlib-3.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2b336e2d91a3d7006864e0990c83b216fcdca64b5a6484912902cef87313d78"}, + {file = "matplotlib-3.10.8-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:efb30e3baaea72ce5928e32bab719ab4770099079d66726a62b11b1ef7273be4"}, + {file = "matplotlib-3.10.8-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d56a1efd5bfd61486c8bc968fa18734464556f0fb8e51690f4ac25d85cbbbbc2"}, + {file = "matplotlib-3.10.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:238b7ce5717600615c895050239ec955d91f321c209dd110db988500558e70d6"}, + {file = "matplotlib-3.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:18821ace09c763ec93aef5eeff087ee493a24051936d7b9ebcad9662f66501f9"}, + {file = "matplotlib-3.10.8-cp311-cp311-win_arm64.whl", hash = "sha256:bab485bcf8b1c7d2060b4fcb6fc368a9e6f4cd754c9c2fea281f4be21df394a2"}, + {file = "matplotlib-3.10.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:64fcc24778ca0404ce0cb7b6b77ae1f4c7231cdd60e6778f999ee05cbd581b9a"}, + {file = "matplotlib-3.10.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9a5ca4ac220a0cdd1ba6bcba3608547117d30468fefce49bb26f55c1a3d5c58"}, + {file = "matplotlib-3.10.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ab4aabc72de4ff77b3ec33a6d78a68227bf1123465887f9905ba79184a1cc04"}, + {file = "matplotlib-3.10.8-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:24d50994d8c5816ddc35411e50a86ab05f575e2530c02752e02538122613371f"}, + {file = "matplotlib-3.10.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:99eefd13c0dc3b3c1b4d561c1169e65fe47aab7b8158754d7c084088e2329466"}, + {file = "matplotlib-3.10.8-cp312-cp312-win_amd64.whl", hash = "sha256:dd80ecb295460a5d9d260df63c43f4afbdd832d725a531f008dad1664f458adf"}, + {file = "matplotlib-3.10.8-cp312-cp312-win_arm64.whl", hash = "sha256:3c624e43ed56313651bc18a47f838b60d7b8032ed348911c54906b130b20071b"}, + {file = "matplotlib-3.10.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3f2e409836d7f5ac2f1c013110a4d50b9f7edc26328c108915f9075d7d7a91b6"}, + {file = "matplotlib-3.10.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:56271f3dac49a88d7fca5060f004d9d22b865f743a12a23b1e937a0be4818ee1"}, + {file = "matplotlib-3.10.8-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a0a7f52498f72f13d4a25ea70f35f4cb60642b466cbb0a9be951b5bc3f45a486"}, + {file = "matplotlib-3.10.8-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646d95230efb9ca614a7a594d4fcacde0ac61d25e37dd51710b36477594963ce"}, + {file = "matplotlib-3.10.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f89c151aab2e2e23cb3fe0acad1e8b82841fd265379c4cecd0f3fcb34c15e0f6"}, + {file = "matplotlib-3.10.8-cp313-cp313-win_amd64.whl", hash = "sha256:e8ea3e2d4066083e264e75c829078f9e149fa119d27e19acd503de65e0b13149"}, + {file = "matplotlib-3.10.8-cp313-cp313-win_arm64.whl", hash = "sha256:c108a1d6fa78a50646029cb6d49808ff0fc1330fda87fa6f6250c6b5369b6645"}, + {file = "matplotlib-3.10.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ad3d9833a64cf48cc4300f2b406c3d0f4f4724a91c0bd5640678a6ba7c102077"}, + {file = "matplotlib-3.10.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:eb3823f11823deade26ce3b9f40dcb4a213da7a670013929f31d5f5ed1055b22"}, + {file = "matplotlib-3.10.8-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d9050fee89a89ed57b4fb2c1bfac9a3d0c57a0d55aed95949eedbc42070fea39"}, + {file = "matplotlib-3.10.8-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b44d07310e404ba95f8c25aa5536f154c0a8ec473303535949e52eb71d0a1565"}, + {file = "matplotlib-3.10.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0a33deb84c15ede243aead39f77e990469fff93ad1521163305095b77b72ce4a"}, + {file = "matplotlib-3.10.8-cp313-cp313t-win_amd64.whl", hash = "sha256:3a48a78d2786784cc2413e57397981fb45c79e968d99656706018d6e62e57958"}, + {file = "matplotlib-3.10.8-cp313-cp313t-win_arm64.whl", hash = "sha256:15d30132718972c2c074cd14638c7f4592bd98719e2308bccea40e0538bc0cb5"}, + {file = "matplotlib-3.10.8-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b53285e65d4fa4c86399979e956235deb900be5baa7fc1218ea67fbfaeaadd6f"}, + {file = "matplotlib-3.10.8-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:32f8dce744be5569bebe789e46727946041199030db8aeb2954d26013a0eb26b"}, + {file = "matplotlib-3.10.8-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf267add95b1c88300d96ca837833d4112756045364f5c734a2276038dae27d"}, + {file = "matplotlib-3.10.8-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2cf5bd12cecf46908f286d7838b2abc6c91cda506c0445b8223a7c19a00df008"}, + {file = "matplotlib-3.10.8-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:41703cc95688f2516b480f7f339d8851a6035f18e100ee6a32bc0b8536a12a9c"}, + {file = "matplotlib-3.10.8-cp314-cp314-win_amd64.whl", hash = "sha256:83d282364ea9f3e52363da262ce32a09dfe241e4080dcedda3c0db059d3c1f11"}, + {file = "matplotlib-3.10.8-cp314-cp314-win_arm64.whl", hash = "sha256:2c1998e92cd5999e295a731bcb2911c75f597d937341f3030cc24ef2733d78a8"}, + {file = "matplotlib-3.10.8-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b5a2b97dbdc7d4f353ebf343744f1d1f1cca8aa8bfddb4262fcf4306c3761d50"}, + {file = "matplotlib-3.10.8-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3f5c3e4da343bba819f0234186b9004faba952cc420fbc522dc4e103c1985908"}, + {file = "matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f62550b9a30afde8c1c3ae450e5eb547d579dd69b25c2fc7a1c67f934c1717a"}, + {file = "matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:495672de149445ec1b772ff2c9ede9b769e3cb4f0d0aa7fa730d7f59e2d4e1c1"}, + {file = "matplotlib-3.10.8-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:595ba4d8fe983b88f0eec8c26a241e16d6376fe1979086232f481f8f3f67494c"}, + {file = "matplotlib-3.10.8-cp314-cp314t-win_amd64.whl", hash = "sha256:25d380fe8b1dc32cf8f0b1b448470a77afb195438bafdf1d858bfb876f3edf7b"}, + {file = "matplotlib-3.10.8-cp314-cp314t-win_arm64.whl", hash = "sha256:113bb52413ea508ce954a02c10ffd0d565f9c3bc7f2eddc27dfe1731e71c7b5f"}, + {file = "matplotlib-3.10.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f97aeb209c3d2511443f8797e3e5a569aebb040d4f8bc79aa3ee78a8fb9e3dd8"}, + {file = "matplotlib-3.10.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fb061f596dad3a0f52b60dc6a5dec4a0c300dec41e058a7efe09256188d170b7"}, + {file = "matplotlib-3.10.8-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:12d90df9183093fcd479f4172ac26b322b1248b15729cb57f42f71f24c7e37a3"}, + {file = "matplotlib-3.10.8-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:6da7c2ce169267d0d066adcf63758f0604aa6c3eebf67458930f9d9b79ad1db1"}, + {file = "matplotlib-3.10.8-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9153c3292705be9f9c64498a8872118540c3f4123d1a1c840172edf262c8be4a"}, + {file = "matplotlib-3.10.8-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ae029229a57cd1e8fe542485f27e7ca7b23aa9e8944ddb4985d0bc444f1eca2"}, + {file = "matplotlib-3.10.8.tar.gz", hash = "sha256:2299372c19d56bcd35cf05a2738308758d32b9eaed2371898d8f5bd33f084aa3"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.3.1" +numpy = ">=1.23" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=3" +python-dateutil = ">=2.7" + +[package.extras] +dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] + +[[package]] +name = "matplotlib-inline" +version = "0.2.1" +description = "Inline Matplotlib backend for Jupyter" +optional = false +python-versions = ">=3.9" +groups = ["vectorbt"] +files = [ + {file = "matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76"}, + {file = "matplotlib_inline-0.2.1.tar.gz", hash = "sha256:e1ee949c340d771fc39e241ea75683deb94762c8fa5f2927ec57c83c4dffa9fe"}, +] + +[package.dependencies] +traitlets = "*" + +[package.extras] +test = ["flake8", "nbdime", "nbval", "notebook", "pytest"] + [[package]] name = "mdx-include" version = "1.4.2" @@ -1355,28 +1950,28 @@ pyyaml = ">=5.1" [[package]] name = "mkdocs-material" -version = "9.6.22" +version = "9.7.1" description = "Documentation that simply works" optional = false python-versions = ">=3.8" groups = ["docs"] files = [ - {file = "mkdocs_material-9.6.22-py3-none-any.whl", hash = "sha256:14ac5f72d38898b2f98ac75a5531aaca9366eaa427b0f49fc2ecf04d99b7ad84"}, - {file = "mkdocs_material-9.6.22.tar.gz", hash = "sha256:87c158b0642e1ada6da0cbd798a3389b0bc5516b90e5ece4a0fb939f00bacd1c"}, + {file = "mkdocs_material-9.7.1-py3-none-any.whl", hash = "sha256:3f6100937d7d731f87f1e3e3b021c97f7239666b9ba1151ab476cabb96c60d5c"}, + {file = "mkdocs_material-9.7.1.tar.gz", hash = "sha256:89601b8f2c3e6c6ee0a918cc3566cb201d40bf37c3cd3c2067e26fadb8cce2b8"}, ] [package.dependencies] -babel = ">=2.10,<3.0" -backrefs = ">=5.7.post1,<6.0" -colorama = ">=0.4,<1.0" -jinja2 = ">=3.1,<4.0" -markdown = ">=3.2,<4.0" -mkdocs = ">=1.6,<2.0" -mkdocs-material-extensions = ">=1.3,<2.0" -paginate = ">=0.5,<1.0" -pygments = ">=2.16,<3.0" -pymdown-extensions = ">=10.2,<11.0" -requests = ">=2.26,<3.0" +babel = ">=2.10" +backrefs = ">=5.7.post1" +colorama = ">=0.4" +jinja2 = ">=3.1" +markdown = ">=3.2" +mkdocs = ">=1.6" +mkdocs-material-extensions = ">=1.3" +paginate = ">=0.5" +pygments = ">=2.16" +pymdown-extensions = ">=10.2" +requests = ">=2.30" [package.extras] git = ["mkdocs-git-committers-plugin-2 (>=1.1,<3)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] @@ -1423,14 +2018,14 @@ python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] [[package]] name = "mkdocstrings-python" -version = "1.18.2" +version = "2.0.1" description = "A Python handler for mkdocstrings." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["docs"] files = [ - {file = "mkdocstrings_python-1.18.2-py3-none-any.whl", hash = "sha256:944fe6deb8f08f33fa936d538233c4036e9f53e840994f6146e8e94eb71b600d"}, - {file = "mkdocstrings_python-1.18.2.tar.gz", hash = "sha256:4ad536920a07b6336f50d4c6d5603316fafb1172c5c882370cbbc954770ad323"}, + {file = "mkdocstrings_python-2.0.1-py3-none-any.whl", hash = "sha256:66ecff45c5f8b71bf174e11d49afc845c2dfc7fc0ab17a86b6b337e0f24d8d90"}, + {file = "mkdocstrings_python-2.0.1.tar.gz", hash = "sha256:843a562221e6a471fefdd4b45cc6c22d2607ccbad632879234fa9692e9cf7732"}, ] [package.dependencies] @@ -1449,27 +2044,101 @@ files = [ {file = "multitasking-0.0.12.tar.gz", hash = "sha256:2fba2fa8ed8c4b85e227c5dd7dc41c7d658de3b6f247927316175a57349b84d1"}, ] +[[package]] +name = "mypy-extensions" +version = "1.1.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, +] + +[[package]] +name = "narwhals" +version = "2.15.0" +description = "Extremely lightweight compatibility layer between dataframe libraries" +optional = false +python-versions = ">=3.9" +groups = ["vectorbt"] +files = [ + {file = "narwhals-2.15.0-py3-none-any.whl", hash = "sha256:cbfe21ca19d260d9fd67f995ec75c44592d1f106933b03ddd375df7ac841f9d6"}, + {file = "narwhals-2.15.0.tar.gz", hash = "sha256:a9585975b99d95084268445a1fdd881311fa26ef1caa18020d959d5b2ff9a965"}, +] + +[package.extras] +cudf = ["cudf (>=24.10.0)"] +dask = ["dask[dataframe] (>=2024.8)"] +duckdb = ["duckdb (>=1.1)"] +ibis = ["ibis-framework (>=6.0.0)", "packaging", "pyarrow-hotfix", "rich"] +modin = ["modin"] +pandas = ["pandas (>=1.1.3)"] +polars = ["polars (>=0.20.4)"] +pyarrow = ["pyarrow (>=13.0.0)"] +pyspark = ["pyspark (>=3.5.0)"] +pyspark-connect = ["pyspark[connect] (>=3.5.0)"] +sqlframe = ["sqlframe (>=3.22.0,!=3.39.3)"] + [[package]] name = "networkx" -version = "3.5" +version = "3.6" description = "Python package for creating and manipulating graphs and networks" optional = false python-versions = ">=3.11" groups = ["main"] files = [ - {file = "networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec"}, - {file = "networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037"}, + {file = "networkx-3.6-py3-none-any.whl", hash = "sha256:cdb395b105806062473d3be36458d8f1459a4e4b98e236a66c3a48996e07684f"}, + {file = "networkx-3.6.tar.gz", hash = "sha256:285276002ad1f7f7da0f7b42f004bcba70d381e936559166363707fdad3d72ad"}, ] [package.extras] +benchmarking = ["asv", "virtualenv"] default = ["matplotlib (>=3.8)", "numpy (>=1.25)", "pandas (>=2.0)", "scipy (>=1.11.2)"] developer = ["mypy (>=1.15)", "pre-commit (>=4.1)"] doc = ["intersphinx-registry", "myst-nb (>=1.1)", "numpydoc (>=1.8.0)", "pillow (>=10)", "pydata-sphinx-theme (>=0.16)", "sphinx (>=8.0)", "sphinx-gallery (>=0.18)", "texext (>=0.6.7)"] -example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "momepy (>=0.7.2)", "osmnx (>=2.0.0)", "scikit-learn (>=1.5)", "seaborn (>=0.13)"] +example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "iplotx (>=0.9.0)", "momepy (>=0.7.2)", "osmnx (>=2.0.0)", "scikit-learn (>=1.5)", "seaborn (>=0.13)"] extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"] +release = ["build (>=0.10)", "changelist (==0.5)", "twine (>=4.0)", "wheel (>=0.40)"] test = ["pytest (>=7.2)", "pytest-cov (>=4.0)", "pytest-xdist (>=3.0)"] test-extras = ["pytest-mpl", "pytest-randomly"] +[[package]] +name = "numba" +version = "0.63.1" +description = "compiling Python code using LLVM" +optional = false +python-versions = ">=3.10" +groups = ["vectorbt"] +files = [ + {file = "numba-0.63.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6d6bf5bf00f7db629305caaec82a2ffb8abe2bf45eaad0d0738dc7de4113779"}, + {file = "numba-0.63.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:08653d0dfc9cc9c4c9a8fba29ceb1f2d5340c3b86c4a7e5e07e42b643bc6a2f4"}, + {file = "numba-0.63.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f09eebf5650246ce2a4e9a8d38270e2d4b0b0ae978103bafb38ed7adc5ea906e"}, + {file = "numba-0.63.1-cp310-cp310-win_amd64.whl", hash = "sha256:f8bba17421d865d8c0f7be2142754ebce53e009daba41c44cf6909207d1a8d7d"}, + {file = "numba-0.63.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b33db00f18ccc790ee9911ce03fcdfe9d5124637d1ecc266f5ae0df06e02fec3"}, + {file = "numba-0.63.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7d31ea186a78a7c0f6b1b2a3fe68057fdb291b045c52d86232b5383b6cf4fc25"}, + {file = "numba-0.63.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ed3bb2fbdb651d6aac394388130a7001aab6f4541837123a4b4ab8b02716530c"}, + {file = "numba-0.63.1-cp311-cp311-win_amd64.whl", hash = "sha256:1ecbff7688f044b1601be70113e2fb1835367ee0b28ffa8f3adf3a05418c5c87"}, + {file = "numba-0.63.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2819cd52afa5d8d04e057bdfd54367575105f8829350d8fb5e4066fb7591cc71"}, + {file = "numba-0.63.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5cfd45dbd3d409e713b1ccfdc2ee72ca82006860254429f4ef01867fdba5845f"}, + {file = "numba-0.63.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:69a599df6976c03b7ecf15d05302696f79f7e6d10d620367407517943355bcb0"}, + {file = "numba-0.63.1-cp312-cp312-win_amd64.whl", hash = "sha256:bbad8c63e4fc7eb3cdb2c2da52178e180419f7969f9a685f283b313a70b92af3"}, + {file = "numba-0.63.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:0bd4fd820ef7442dcc07da184c3f54bb41d2bdb7b35bacf3448e73d081f730dc"}, + {file = "numba-0.63.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:53de693abe4be3bd4dee38e1c55f01c55ff644a6a3696a3670589e6e4c39cde2"}, + {file = "numba-0.63.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:81227821a72a763c3d4ac290abbb4371d855b59fdf85d5af22a47c0e86bf8c7e"}, + {file = "numba-0.63.1-cp313-cp313-win_amd64.whl", hash = "sha256:eb227b07c2ac37b09432a9bda5142047a2d1055646e089d4a240a2643e508102"}, + {file = "numba-0.63.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:f180883e5508940cc83de8a8bea37fc6dd20fbe4e5558d4659b8b9bef5ff4731"}, + {file = "numba-0.63.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0938764afa82a47c0e895637a6c55547a42c9e1d35cac42285b1fa60a8b02bb"}, + {file = "numba-0.63.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f90a929fa5094e062d4e0368ede1f4497d5e40f800e80aa5222c4734236a2894"}, + {file = "numba-0.63.1-cp314-cp314-win_amd64.whl", hash = "sha256:8d6d5ce85f572ed4e1a135dbb8c0114538f9dd0e3657eeb0bb64ab204cbe2a8f"}, + {file = "numba-0.63.1.tar.gz", hash = "sha256:b320aa675d0e3b17b40364935ea52a7b1c670c9037c39cf92c49502a75902f4b"}, +] + +[package.dependencies] +llvmlite = "==0.46.*" +numpy = ">=1.22,<2.4" + [[package]] name = "numexpr" version = "2.14.1" @@ -1542,179 +2211,183 @@ numpy = ">=1.23.0" [[package]] name = "numpy" -version = "2.3.4" +version = "2.3.5" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.11" -groups = ["main"] +groups = ["main", "vectorbt"] files = [ - {file = "numpy-2.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e78aecd2800b32e8347ce49316d3eaf04aed849cd5b38e0af39f829a4e59f5eb"}, - {file = "numpy-2.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7fd09cc5d65bda1e79432859c40978010622112e9194e581e3415a3eccc7f43f"}, - {file = "numpy-2.3.4-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1b219560ae2c1de48ead517d085bc2d05b9433f8e49d0955c82e8cd37bd7bf36"}, - {file = "numpy-2.3.4-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:bafa7d87d4c99752d07815ed7a2c0964f8ab311eb8168f41b910bd01d15b6032"}, - {file = "numpy-2.3.4-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36dc13af226aeab72b7abad501d370d606326a0029b9f435eacb3b8c94b8a8b7"}, - {file = "numpy-2.3.4-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a7b2f9a18b5ff9824a6af80de4f37f4ec3c2aab05ef08f51c77a093f5b89adda"}, - {file = "numpy-2.3.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9984bd645a8db6ca15d850ff996856d8762c51a2239225288f08f9050ca240a0"}, - {file = "numpy-2.3.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:64c5825affc76942973a70acf438a8ab618dbd692b84cd5ec40a0a0509edc09a"}, - {file = "numpy-2.3.4-cp311-cp311-win32.whl", hash = "sha256:ed759bf7a70342f7817d88376eb7142fab9fef8320d6019ef87fae05a99874e1"}, - {file = "numpy-2.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:faba246fb30ea2a526c2e9645f61612341de1a83fb1e0c5edf4ddda5a9c10996"}, - {file = "numpy-2.3.4-cp311-cp311-win_arm64.whl", hash = "sha256:4c01835e718bcebe80394fd0ac66c07cbb90147ebbdad3dcecd3f25de2ae7e2c"}, - {file = "numpy-2.3.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ef1b5a3e808bc40827b5fa2c8196151a4c5abe110e1726949d7abddfe5c7ae11"}, - {file = "numpy-2.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c2f91f496a87235c6aaf6d3f3d89b17dba64996abadccb289f48456cff931ca9"}, - {file = "numpy-2.3.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f77e5b3d3da652b474cc80a14084927a5e86a5eccf54ca8ca5cbd697bf7f2667"}, - {file = "numpy-2.3.4-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ab1c5f5ee40d6e01cbe96de5863e39b215a4d24e7d007cad56c7184fdf4aeef"}, - {file = "numpy-2.3.4-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77b84453f3adcb994ddbd0d1c5d11db2d6bda1a2b7fd5ac5bd4649d6f5dc682e"}, - {file = "numpy-2.3.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4121c5beb58a7f9e6dfdee612cb24f4df5cd4db6e8261d7f4d7450a997a65d6a"}, - {file = "numpy-2.3.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65611ecbb00ac9846efe04db15cbe6186f562f6bb7e5e05f077e53a599225d16"}, - {file = "numpy-2.3.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dabc42f9c6577bcc13001b8810d300fe814b4cfbe8a92c873f269484594f9786"}, - {file = "numpy-2.3.4-cp312-cp312-win32.whl", hash = "sha256:a49d797192a8d950ca59ee2d0337a4d804f713bb5c3c50e8db26d49666e351dc"}, - {file = "numpy-2.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:985f1e46358f06c2a09921e8921e2c98168ed4ae12ccd6e5e87a4f1857923f32"}, - {file = "numpy-2.3.4-cp312-cp312-win_arm64.whl", hash = "sha256:4635239814149e06e2cb9db3dd584b2fa64316c96f10656983b8026a82e6e4db"}, - {file = "numpy-2.3.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c090d4860032b857d94144d1a9976b8e36709e40386db289aaf6672de2a81966"}, - {file = "numpy-2.3.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a13fc473b6db0be619e45f11f9e81260f7302f8d180c49a22b6e6120022596b3"}, - {file = "numpy-2.3.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:3634093d0b428e6c32c3a69b78e554f0cd20ee420dcad5a9f3b2a63762ce4197"}, - {file = "numpy-2.3.4-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:043885b4f7e6e232d7df4f51ffdef8c36320ee9d5f227b380ea636722c7ed12e"}, - {file = "numpy-2.3.4-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4ee6a571d1e4f0ea6d5f22d6e5fbd6ed1dc2b18542848e1e7301bd190500c9d7"}, - {file = "numpy-2.3.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fc8a63918b04b8571789688b2780ab2b4a33ab44bfe8ccea36d3eba51228c953"}, - {file = "numpy-2.3.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:40cc556d5abbc54aabe2b1ae287042d7bdb80c08edede19f0c0afb36ae586f37"}, - {file = "numpy-2.3.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ecb63014bb7f4ce653f8be7f1df8cbc6093a5a2811211770f6606cc92b5a78fd"}, - {file = "numpy-2.3.4-cp313-cp313-win32.whl", hash = "sha256:e8370eb6925bb8c1c4264fec52b0384b44f675f191df91cbe0140ec9f0955646"}, - {file = "numpy-2.3.4-cp313-cp313-win_amd64.whl", hash = "sha256:56209416e81a7893036eea03abcb91c130643eb14233b2515c90dcac963fe99d"}, - {file = "numpy-2.3.4-cp313-cp313-win_arm64.whl", hash = "sha256:a700a4031bc0fd6936e78a752eefb79092cecad2599ea9c8039c548bc097f9bc"}, - {file = "numpy-2.3.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:86966db35c4040fdca64f0816a1c1dd8dbd027d90fca5a57e00e1ca4cd41b879"}, - {file = "numpy-2.3.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:838f045478638b26c375ee96ea89464d38428c69170360b23a1a50fa4baa3562"}, - {file = "numpy-2.3.4-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d7315ed1dab0286adca467377c8381cd748f3dc92235f22a7dfc42745644a96a"}, - {file = "numpy-2.3.4-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:84f01a4d18b2cc4ade1814a08e5f3c907b079c847051d720fad15ce37aa930b6"}, - {file = "numpy-2.3.4-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:817e719a868f0dacde4abdfc5c1910b301877970195db9ab6a5e2c4bd5b121f7"}, - {file = "numpy-2.3.4-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85e071da78d92a214212cacea81c6da557cab307f2c34b5f85b628e94803f9c0"}, - {file = "numpy-2.3.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2ec646892819370cf3558f518797f16597b4e4669894a2ba712caccc9da53f1f"}, - {file = "numpy-2.3.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:035796aaaddfe2f9664b9a9372f089cfc88bd795a67bd1bfe15e6e770934cf64"}, - {file = "numpy-2.3.4-cp313-cp313t-win32.whl", hash = "sha256:fea80f4f4cf83b54c3a051f2f727870ee51e22f0248d3114b8e755d160b38cfb"}, - {file = "numpy-2.3.4-cp313-cp313t-win_amd64.whl", hash = "sha256:15eea9f306b98e0be91eb344a94c0e630689ef302e10c2ce5f7e11905c704f9c"}, - {file = "numpy-2.3.4-cp313-cp313t-win_arm64.whl", hash = "sha256:b6c231c9c2fadbae4011ca5e7e83e12dc4a5072f1a1d85a0a7b3ed754d145a40"}, - {file = "numpy-2.3.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:81c3e6d8c97295a7360d367f9f8553973651b76907988bb6066376bc2252f24e"}, - {file = "numpy-2.3.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:7c26b0b2bf58009ed1f38a641f3db4be8d960a417ca96d14e5b06df1506d41ff"}, - {file = "numpy-2.3.4-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:62b2198c438058a20b6704351b35a1d7db881812d8512d67a69c9de1f18ca05f"}, - {file = "numpy-2.3.4-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:9d729d60f8d53a7361707f4b68a9663c968882dd4f09e0d58c044c8bf5faee7b"}, - {file = "numpy-2.3.4-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bd0c630cf256b0a7fd9d0a11c9413b42fef5101219ce6ed5a09624f5a65392c7"}, - {file = "numpy-2.3.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5e081bc082825f8b139f9e9fe42942cb4054524598aaeb177ff476cc76d09d2"}, - {file = "numpy-2.3.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:15fb27364ed84114438fff8aaf998c9e19adbeba08c0b75409f8c452a8692c52"}, - {file = "numpy-2.3.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:85d9fb2d8cd998c84d13a79a09cc0c1091648e848e4e6249b0ccd7f6b487fa26"}, - {file = "numpy-2.3.4-cp314-cp314-win32.whl", hash = "sha256:e73d63fd04e3a9d6bc187f5455d81abfad05660b212c8804bf3b407e984cd2bc"}, - {file = "numpy-2.3.4-cp314-cp314-win_amd64.whl", hash = "sha256:3da3491cee49cf16157e70f607c03a217ea6647b1cea4819c4f48e53d49139b9"}, - {file = "numpy-2.3.4-cp314-cp314-win_arm64.whl", hash = "sha256:6d9cd732068e8288dbe2717177320723ccec4fb064123f0caf9bbd90ab5be868"}, - {file = "numpy-2.3.4-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:22758999b256b595cf0b1d102b133bb61866ba5ceecf15f759623b64c020c9ec"}, - {file = "numpy-2.3.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9cb177bc55b010b19798dc5497d540dea67fd13a8d9e882b2dae71de0cf09eb3"}, - {file = "numpy-2.3.4-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0f2bcc76f1e05e5ab58893407c63d90b2029908fa41f9f1cc51eecce936c3365"}, - {file = "numpy-2.3.4-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:8dc20bde86802df2ed8397a08d793da0ad7a5fd4ea3ac85d757bf5dd4ad7c252"}, - {file = "numpy-2.3.4-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e199c087e2aa71c8f9ce1cb7a8e10677dc12457e7cc1be4798632da37c3e86e"}, - {file = "numpy-2.3.4-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85597b2d25ddf655495e2363fe044b0ae999b75bc4d630dc0d886484b03a5eb0"}, - {file = "numpy-2.3.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04a69abe45b49c5955923cf2c407843d1c85013b424ae8a560bba16c92fe44a0"}, - {file = "numpy-2.3.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e1708fac43ef8b419c975926ce1eaf793b0c13b7356cfab6ab0dc34c0a02ac0f"}, - {file = "numpy-2.3.4-cp314-cp314t-win32.whl", hash = "sha256:863e3b5f4d9915aaf1b8ec79ae560ad21f0b8d5e3adc31e73126491bb86dee1d"}, - {file = "numpy-2.3.4-cp314-cp314t-win_amd64.whl", hash = "sha256:962064de37b9aef801d33bc579690f8bfe6c5e70e29b61783f60bcba838a14d6"}, - {file = "numpy-2.3.4-cp314-cp314t-win_arm64.whl", hash = "sha256:8b5a9a39c45d852b62693d9b3f3e0fe052541f804296ff401a72a1b60edafb29"}, - {file = "numpy-2.3.4-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:6e274603039f924c0fe5cb73438fa9246699c78a6df1bd3decef9ae592ae1c05"}, - {file = "numpy-2.3.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d149aee5c72176d9ddbc6803aef9c0f6d2ceeea7626574fc68518da5476fa346"}, - {file = "numpy-2.3.4-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:6d34ed9db9e6395bb6cd33286035f73a59b058169733a9db9f85e650b88df37e"}, - {file = "numpy-2.3.4-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:fdebe771ca06bb8d6abce84e51dca9f7921fe6ad34a0c914541b063e9a68928b"}, - {file = "numpy-2.3.4-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:957e92defe6c08211eb77902253b14fe5b480ebc5112bc741fd5e9cd0608f847"}, - {file = "numpy-2.3.4-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13b9062e4f5c7ee5c7e5be96f29ba71bc5a37fed3d1d77c37390ae00724d296d"}, - {file = "numpy-2.3.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:81b3a59793523e552c4a96109dde028aa4448ae06ccac5a76ff6532a85558a7f"}, - {file = "numpy-2.3.4.tar.gz", hash = "sha256:a7d018bfedb375a8d979ac758b120ba846a7fe764911a64465fd87b8729f4a6a"}, + {file = "numpy-2.3.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de5672f4a7b200c15a4127042170a694d4df43c992948f5e1af57f0174beed10"}, + {file = "numpy-2.3.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:acfd89508504a19ed06ef963ad544ec6664518c863436306153e13e94605c218"}, + {file = "numpy-2.3.5-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:ffe22d2b05504f786c867c8395de703937f934272eb67586817b46188b4ded6d"}, + {file = "numpy-2.3.5-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:872a5cf366aec6bb1147336480fef14c9164b154aeb6542327de4970282cd2f5"}, + {file = "numpy-2.3.5-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3095bdb8dd297e5920b010e96134ed91d852d81d490e787beca7e35ae1d89cf7"}, + {file = "numpy-2.3.5-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cba086a43d54ca804ce711b2a940b16e452807acebe7852ff327f1ecd49b0d4"}, + {file = "numpy-2.3.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6cf9b429b21df6b99f4dee7a1218b8b7ffbbe7df8764dc0bd60ce8a0708fed1e"}, + {file = "numpy-2.3.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:396084a36abdb603546b119d96528c2f6263921c50df3c8fd7cb28873a237748"}, + {file = "numpy-2.3.5-cp311-cp311-win32.whl", hash = "sha256:b0c7088a73aef3d687c4deef8452a3ac7c1be4e29ed8bf3b366c8111128ac60c"}, + {file = "numpy-2.3.5-cp311-cp311-win_amd64.whl", hash = "sha256:a414504bef8945eae5f2d7cb7be2d4af77c5d1cb5e20b296c2c25b61dff2900c"}, + {file = "numpy-2.3.5-cp311-cp311-win_arm64.whl", hash = "sha256:0cd00b7b36e35398fa2d16af7b907b65304ef8bb4817a550e06e5012929830fa"}, + {file = "numpy-2.3.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:74ae7b798248fe62021dbf3c914245ad45d1a6b0cb4a29ecb4b31d0bfbc4cc3e"}, + {file = "numpy-2.3.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee3888d9ff7c14604052b2ca5535a30216aa0a58e948cdd3eeb8d3415f638769"}, + {file = "numpy-2.3.5-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:612a95a17655e213502f60cfb9bf9408efdc9eb1d5f50535cc6eb365d11b42b5"}, + {file = "numpy-2.3.5-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3101e5177d114a593d79dd79658650fe28b5a0d8abeb8ce6f437c0e6df5be1a4"}, + {file = "numpy-2.3.5-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b973c57ff8e184109db042c842423ff4f60446239bd585a5131cc47f06f789d"}, + {file = "numpy-2.3.5-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d8163f43acde9a73c2a33605353a4f1bc4798745a8b1d73183b28e5b435ae28"}, + {file = "numpy-2.3.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:51c1e14eb1e154ebd80e860722f9e6ed6ec89714ad2db2d3aa33c31d7c12179b"}, + {file = "numpy-2.3.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b46b4ec24f7293f23adcd2d146960559aaf8020213de8ad1909dba6c013bf89c"}, + {file = "numpy-2.3.5-cp312-cp312-win32.whl", hash = "sha256:3997b5b3c9a771e157f9aae01dd579ee35ad7109be18db0e85dbdbe1de06e952"}, + {file = "numpy-2.3.5-cp312-cp312-win_amd64.whl", hash = "sha256:86945f2ee6d10cdfd67bcb4069c1662dd711f7e2a4343db5cecec06b87cf31aa"}, + {file = "numpy-2.3.5-cp312-cp312-win_arm64.whl", hash = "sha256:f28620fe26bee16243be2b7b874da327312240a7cdc38b769a697578d2100013"}, + {file = "numpy-2.3.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d0f23b44f57077c1ede8c5f26b30f706498b4862d3ff0a7298b8411dd2f043ff"}, + {file = "numpy-2.3.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa5bc7c5d59d831d9773d1170acac7893ce3a5e130540605770ade83280e7188"}, + {file = "numpy-2.3.5-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:ccc933afd4d20aad3c00bcef049cb40049f7f196e0397f1109dba6fed63267b0"}, + {file = "numpy-2.3.5-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:afaffc4393205524af9dfa400fa250143a6c3bc646c08c9f5e25a9f4b4d6a903"}, + {file = "numpy-2.3.5-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c75442b2209b8470d6d5d8b1c25714270686f14c749028d2199c54e29f20b4d"}, + {file = "numpy-2.3.5-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11e06aa0af8c0f05104d56450d6093ee639e15f24ecf62d417329d06e522e017"}, + {file = "numpy-2.3.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed89927b86296067b4f81f108a2271d8926467a8868e554eaf370fc27fa3ccaf"}, + {file = "numpy-2.3.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51c55fe3451421f3a6ef9a9c1439e82101c57a2c9eab9feb196a62b1a10b58ce"}, + {file = "numpy-2.3.5-cp313-cp313-win32.whl", hash = "sha256:1978155dd49972084bd6ef388d66ab70f0c323ddee6f693d539376498720fb7e"}, + {file = "numpy-2.3.5-cp313-cp313-win_amd64.whl", hash = "sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b"}, + {file = "numpy-2.3.5-cp313-cp313-win_arm64.whl", hash = "sha256:0472f11f6ec23a74a906a00b48a4dcf3849209696dff7c189714511268d103ae"}, + {file = "numpy-2.3.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:414802f3b97f3c1eef41e530aaba3b3c1620649871d8cb38c6eaff034c2e16bd"}, + {file = "numpy-2.3.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5ee6609ac3604fa7780e30a03e5e241a7956f8e2fcfe547d51e3afa5247ac47f"}, + {file = "numpy-2.3.5-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:86d835afea1eaa143012a2d7a3f45a3adce2d7adc8b4961f0b362214d800846a"}, + {file = "numpy-2.3.5-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:30bc11310e8153ca664b14c5f1b73e94bd0503681fcf136a163de856f3a50139"}, + {file = "numpy-2.3.5-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1062fde1dcf469571705945b0f221b73928f34a20c904ffb45db101907c3454e"}, + {file = "numpy-2.3.5-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce581db493ea1a96c0556360ede6607496e8bf9b3a8efa66e06477267bc831e9"}, + {file = "numpy-2.3.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:cc8920d2ec5fa99875b670bb86ddeb21e295cb07aa331810d9e486e0b969d946"}, + {file = "numpy-2.3.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9ee2197ef8c4f0dfe405d835f3b6a14f5fee7782b5de51ba06fb65fc9b36e9f1"}, + {file = "numpy-2.3.5-cp313-cp313t-win32.whl", hash = "sha256:70b37199913c1bd300ff6e2693316c6f869c7ee16378faf10e4f5e3275b299c3"}, + {file = "numpy-2.3.5-cp313-cp313t-win_amd64.whl", hash = "sha256:b501b5fa195cc9e24fe102f21ec0a44dffc231d2af79950b451e0d99cea02234"}, + {file = "numpy-2.3.5-cp313-cp313t-win_arm64.whl", hash = "sha256:a80afd79f45f3c4a7d341f13acbe058d1ca8ac017c165d3fa0d3de6bc1a079d7"}, + {file = "numpy-2.3.5-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:bf06bc2af43fa8d32d30fae16ad965663e966b1a3202ed407b84c989c3221e82"}, + {file = "numpy-2.3.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:052e8c42e0c49d2575621c158934920524f6c5da05a1d3b9bab5d8e259e045f0"}, + {file = "numpy-2.3.5-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:1ed1ec893cff7040a02c8aa1c8611b94d395590d553f6b53629a4461dc7f7b63"}, + {file = "numpy-2.3.5-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:2dcd0808a421a482a080f89859a18beb0b3d1e905b81e617a188bd80422d62e9"}, + {file = "numpy-2.3.5-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:727fd05b57df37dc0bcf1a27767a3d9a78cbbc92822445f32cc3436ba797337b"}, + {file = "numpy-2.3.5-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fffe29a1ef00883599d1dc2c51aa2e5d80afe49523c261a74933df395c15c520"}, + {file = "numpy-2.3.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8f7f0e05112916223d3f438f293abf0727e1181b5983f413dfa2fefc4098245c"}, + {file = "numpy-2.3.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2e2eb32ddb9ccb817d620ac1d8dae7c3f641c1e5f55f531a33e8ab97960a75b8"}, + {file = "numpy-2.3.5-cp314-cp314-win32.whl", hash = "sha256:66f85ce62c70b843bab1fb14a05d5737741e74e28c7b8b5a064de10142fad248"}, + {file = "numpy-2.3.5-cp314-cp314-win_amd64.whl", hash = "sha256:e6a0bc88393d65807d751a614207b7129a310ca4fe76a74e5c7da5fa5671417e"}, + {file = "numpy-2.3.5-cp314-cp314-win_arm64.whl", hash = "sha256:aeffcab3d4b43712bb7a60b65f6044d444e75e563ff6180af8f98dd4b905dfd2"}, + {file = "numpy-2.3.5-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:17531366a2e3a9e30762c000f2c43a9aaa05728712e25c11ce1dbe700c53ad41"}, + {file = "numpy-2.3.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:d21644de1b609825ede2f48be98dfde4656aefc713654eeee280e37cadc4e0ad"}, + {file = "numpy-2.3.5-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:c804e3a5aba5460c73955c955bdbd5c08c354954e9270a2c1565f62e866bdc39"}, + {file = "numpy-2.3.5-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:cc0a57f895b96ec78969c34f682c602bf8da1a0270b09bc65673df2e7638ec20"}, + {file = "numpy-2.3.5-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:900218e456384ea676e24ea6a0417f030a3b07306d29d7ad843957b40a9d8d52"}, + {file = "numpy-2.3.5-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:09a1bea522b25109bf8e6f3027bd810f7c1085c64a0c7ce050c1676ad0ba010b"}, + {file = "numpy-2.3.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04822c00b5fd0323c8166d66c701dc31b7fbd252c100acd708c48f763968d6a3"}, + {file = "numpy-2.3.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d6889ec4ec662a1a37eb4b4fb26b6100841804dac55bd9df579e326cdc146227"}, + {file = "numpy-2.3.5-cp314-cp314t-win32.whl", hash = "sha256:93eebbcf1aafdf7e2ddd44c2923e2672e1010bddc014138b229e49725b4d6be5"}, + {file = "numpy-2.3.5-cp314-cp314t-win_amd64.whl", hash = "sha256:c8a9958e88b65c3b27e22ca2a076311636850b612d6bbfb76e8d156aacde2aaf"}, + {file = "numpy-2.3.5-cp314-cp314t-win_arm64.whl", hash = "sha256:6203fdf9f3dc5bdaed7319ad8698e685c7a3be10819f41d32a0723e611733b42"}, + {file = "numpy-2.3.5-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f0963b55cdd70fad460fa4c1341f12f976bb26cb66021a5580329bd498988310"}, + {file = "numpy-2.3.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f4255143f5160d0de972d28c8f9665d882b5f61309d8362fdd3e103cf7bf010c"}, + {file = "numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:a4b9159734b326535f4dd01d947f919c6eefd2d9827466a696c44ced82dfbc18"}, + {file = "numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2feae0d2c91d46e59fcd62784a3a83b3fb677fead592ce51b5a6fbb4f95965ff"}, + {file = "numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffac52f28a7849ad7576293c0cb7b9f08304e8f7d738a8cb8a90ec4c55a998eb"}, + {file = "numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63c0e9e7eea69588479ebf4a8a270d5ac22763cc5854e9a7eae952a3908103f7"}, + {file = "numpy-2.3.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f16417ec91f12f814b10bafe79ef77e70113a2f5f7018640e7425ff979253425"}, + {file = "numpy-2.3.5.tar.gz", hash = "sha256:784db1dcdab56bf0517743e746dfb0f885fc68d948aba86eeec2cba234bdf1c0"}, ] [[package]] name = "orjson" -version = "3.11.3" +version = "3.11.5" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "orjson-3.11.3-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:29cb1f1b008d936803e2da3d7cba726fc47232c45df531b29edf0b232dd737e7"}, - {file = "orjson-3.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97dceed87ed9139884a55db8722428e27bd8452817fbf1869c58b49fecab1120"}, - {file = "orjson-3.11.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:58533f9e8266cb0ac298e259ed7b4d42ed3fa0b78ce76860626164de49e0d467"}, - {file = "orjson-3.11.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c212cfdd90512fe722fa9bd620de4d46cda691415be86b2e02243242ae81873"}, - {file = "orjson-3.11.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff835b5d3e67d9207343effb03760c00335f8b5285bfceefd4dc967b0e48f6a"}, - {file = "orjson-3.11.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f5aa4682912a450c2db89cbd92d356fef47e115dffba07992555542f344d301b"}, - {file = "orjson-3.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7d18dd34ea2e860553a579df02041845dee0af8985dff7f8661306f95504ddf"}, - {file = "orjson-3.11.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d8b11701bc43be92ea42bd454910437b355dfb63696c06fe953ffb40b5f763b4"}, - {file = "orjson-3.11.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:90368277087d4af32d38bd55f9da2ff466d25325bf6167c8f382d8ee40cb2bbc"}, - {file = "orjson-3.11.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fd7ff459fb393358d3a155d25b275c60b07a2c83dcd7ea962b1923f5a1134569"}, - {file = "orjson-3.11.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f8d902867b699bcd09c176a280b1acdab57f924489033e53d0afe79817da37e6"}, - {file = "orjson-3.11.3-cp310-cp310-win32.whl", hash = "sha256:bb93562146120bb51e6b154962d3dadc678ed0fce96513fa6bc06599bb6f6edc"}, - {file = "orjson-3.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:976c6f1975032cc327161c65d4194c549f2589d88b105a5e3499429a54479770"}, - {file = "orjson-3.11.3-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9d2ae0cc6aeb669633e0124531f342a17d8e97ea999e42f12a5ad4adaa304c5f"}, - {file = "orjson-3.11.3-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:ba21dbb2493e9c653eaffdc38819b004b7b1b246fb77bfc93dc016fe664eac91"}, - {file = "orjson-3.11.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00f1a271e56d511d1569937c0447d7dce5a99a33ea0dec76673706360a051904"}, - {file = "orjson-3.11.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b67e71e47caa6680d1b6f075a396d04fa6ca8ca09aafb428731da9b3ea32a5a6"}, - {file = "orjson-3.11.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7d012ebddffcce8c85734a6d9e5f08180cd3857c5f5a3ac70185b43775d043d"}, - {file = "orjson-3.11.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd759f75d6b8d1b62012b7f5ef9461d03c804f94d539a5515b454ba3a6588038"}, - {file = "orjson-3.11.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6890ace0809627b0dff19cfad92d69d0fa3f089d3e359a2a532507bb6ba34efb"}, - {file = "orjson-3.11.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9d4a5e041ae435b815e568537755773d05dac031fee6a57b4ba70897a44d9d2"}, - {file = "orjson-3.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d68bf97a771836687107abfca089743885fb664b90138d8761cce61d5625d55"}, - {file = "orjson-3.11.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:bfc27516ec46f4520b18ef645864cee168d2a027dbf32c5537cb1f3e3c22dac1"}, - {file = "orjson-3.11.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f66b001332a017d7945e177e282a40b6997056394e3ed7ddb41fb1813b83e824"}, - {file = "orjson-3.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:212e67806525d2561efbfe9e799633b17eb668b8964abed6b5319b2f1cfbae1f"}, - {file = "orjson-3.11.3-cp311-cp311-win32.whl", hash = "sha256:6e8e0c3b85575a32f2ffa59de455f85ce002b8bdc0662d6b9c2ed6d80ab5d204"}, - {file = "orjson-3.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:6be2f1b5d3dc99a5ce5ce162fc741c22ba9f3443d3dd586e6a1211b7bc87bc7b"}, - {file = "orjson-3.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:fafb1a99d740523d964b15c8db4eabbfc86ff29f84898262bf6e3e4c9e97e43e"}, - {file = "orjson-3.11.3-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8c752089db84333e36d754c4baf19c0e1437012242048439c7e80eb0e6426e3b"}, - {file = "orjson-3.11.3-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:9b8761b6cf04a856eb544acdd82fc594b978f12ac3602d6374a7edb9d86fd2c2"}, - {file = "orjson-3.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b13974dc8ac6ba22feaa867fc19135a3e01a134b4f7c9c28162fed4d615008a"}, - {file = "orjson-3.11.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f83abab5bacb76d9c821fd5c07728ff224ed0e52d7a71b7b3de822f3df04e15c"}, - {file = "orjson-3.11.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6fbaf48a744b94091a56c62897b27c31ee2da93d826aa5b207131a1e13d4064"}, - {file = "orjson-3.11.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc779b4f4bba2847d0d2940081a7b6f7b5877e05408ffbb74fa1faf4a136c424"}, - {file = "orjson-3.11.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd4b909ce4c50faa2192da6bb684d9848d4510b736b0611b6ab4020ea6fd2d23"}, - {file = "orjson-3.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:524b765ad888dc5518bbce12c77c2e83dee1ed6b0992c1790cc5fb49bb4b6667"}, - {file = "orjson-3.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:84fd82870b97ae3cdcea9d8746e592b6d40e1e4d4527835fc520c588d2ded04f"}, - {file = "orjson-3.11.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fbecb9709111be913ae6879b07bafd4b0785b44c1eb5cac8ac76da048b3885a1"}, - {file = "orjson-3.11.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9dba358d55aee552bd868de348f4736ca5a4086d9a62e2bfbbeeb5629fe8b0cc"}, - {file = "orjson-3.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eabcf2e84f1d7105f84580e03012270c7e97ecb1fb1618bda395061b2a84a049"}, - {file = "orjson-3.11.3-cp312-cp312-win32.whl", hash = "sha256:3782d2c60b8116772aea8d9b7905221437fdf53e7277282e8d8b07c220f96cca"}, - {file = "orjson-3.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:79b44319268af2eaa3e315b92298de9a0067ade6e6003ddaef72f8e0bedb94f1"}, - {file = "orjson-3.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:0e92a4e83341ef79d835ca21b8bd13e27c859e4e9e4d7b63defc6e58462a3710"}, - {file = "orjson-3.11.3-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:af40c6612fd2a4b00de648aa26d18186cd1322330bd3a3cc52f87c699e995810"}, - {file = "orjson-3.11.3-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:9f1587f26c235894c09e8b5b7636a38091a9e6e7fe4531937534749c04face43"}, - {file = "orjson-3.11.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61dcdad16da5bb486d7227a37a2e789c429397793a6955227cedbd7252eb5a27"}, - {file = "orjson-3.11.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11c6d71478e2cbea0a709e8a06365fa63da81da6498a53e4c4f065881d21ae8f"}, - {file = "orjson-3.11.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff94112e0098470b665cb0ed06efb187154b63649403b8d5e9aedeb482b4548c"}, - {file = "orjson-3.11.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae8b756575aaa2a855a75192f356bbda11a89169830e1439cfb1a3e1a6dde7be"}, - {file = "orjson-3.11.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9416cc19a349c167ef76135b2fe40d03cea93680428efee8771f3e9fb66079d"}, - {file = "orjson-3.11.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b822caf5b9752bc6f246eb08124c3d12bf2175b66ab74bac2ef3bbf9221ce1b2"}, - {file = "orjson-3.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:414f71e3bdd5573893bf5ecdf35c32b213ed20aa15536fe2f588f946c318824f"}, - {file = "orjson-3.11.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:828e3149ad8815dc14468f36ab2a4b819237c155ee1370341b91ea4c8672d2ee"}, - {file = "orjson-3.11.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac9e05f25627ffc714c21f8dfe3a579445a5c392a9c8ae7ba1d0e9fb5333f56e"}, - {file = "orjson-3.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e44fbe4000bd321d9f3b648ae46e0196d21577cf66ae684a96ff90b1f7c93633"}, - {file = "orjson-3.11.3-cp313-cp313-win32.whl", hash = "sha256:2039b7847ba3eec1f5886e75e6763a16e18c68a63efc4b029ddf994821e2e66b"}, - {file = "orjson-3.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:29be5ac4164aa8bdcba5fa0700a3c9c316b411d8ed9d39ef8a882541bd452fae"}, - {file = "orjson-3.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:18bd1435cb1f2857ceb59cfb7de6f92593ef7b831ccd1b9bfb28ca530e539dce"}, - {file = "orjson-3.11.3-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cf4b81227ec86935568c7edd78352a92e97af8da7bd70bdfdaa0d2e0011a1ab4"}, - {file = "orjson-3.11.3-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:bc8bc85b81b6ac9fc4dae393a8c159b817f4c2c9dee5d12b773bddb3b95fc07e"}, - {file = "orjson-3.11.3-cp314-cp314-manylinux_2_34_aarch64.whl", hash = "sha256:88dcfc514cfd1b0de038443c7b3e6a9797ffb1b3674ef1fd14f701a13397f82d"}, - {file = "orjson-3.11.3-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:d61cd543d69715d5fc0a690c7c6f8dcc307bc23abef9738957981885f5f38229"}, - {file = "orjson-3.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2b7b153ed90ababadbef5c3eb39549f9476890d339cf47af563aea7e07db2451"}, - {file = "orjson-3.11.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7909ae2460f5f494fecbcd10613beafe40381fd0316e35d6acb5f3a05bfda167"}, - {file = "orjson-3.11.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:2030c01cbf77bc67bee7eef1e7e31ecf28649353987775e3583062c752da0077"}, - {file = "orjson-3.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a0169ebd1cbd94b26c7a7ad282cf5c2744fce054133f959e02eb5265deae1872"}, - {file = "orjson-3.11.3-cp314-cp314-win32.whl", hash = "sha256:0c6d7328c200c349e3a4c6d8c83e0a5ad029bdc2d417f234152bf34842d0fc8d"}, - {file = "orjson-3.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:317bbe2c069bbc757b1a2e4105b64aacd3bc78279b66a6b9e51e846e4809f804"}, - {file = "orjson-3.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:e8f6a7a27d7b7bec81bd5924163e9af03d49bbb63013f107b48eb5d16db711bc"}, - {file = "orjson-3.11.3-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:56afaf1e9b02302ba636151cfc49929c1bb66b98794291afd0e5f20fecaf757c"}, - {file = "orjson-3.11.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:913f629adef31d2d350d41c051ce7e33cf0fd06a5d1cb28d49b1899b23b903aa"}, - {file = "orjson-3.11.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0a23b41f8f98b4e61150a03f83e4f0d566880fe53519d445a962929a4d21045"}, - {file = "orjson-3.11.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d721fee37380a44f9d9ce6c701b3960239f4fb3d5ceea7f31cbd43882edaa2f"}, - {file = "orjson-3.11.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73b92a5b69f31b1a58c0c7e31080aeaec49c6e01b9522e71ff38d08f15aa56de"}, - {file = "orjson-3.11.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2489b241c19582b3f1430cc5d732caefc1aaf378d97e7fb95b9e56bed11725f"}, - {file = "orjson-3.11.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5189a5dab8b0312eadaf9d58d3049b6a52c454256493a557405e77a3d67ab7f"}, - {file = "orjson-3.11.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9d8787bdfbb65a85ea76d0e96a3b1bed7bf0fbcb16d40408dc1172ad784a49d2"}, - {file = "orjson-3.11.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:8e531abd745f51f8035e207e75e049553a86823d189a51809c078412cefb399a"}, - {file = "orjson-3.11.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8ab962931015f170b97a3dd7bd933399c1bae8ed8ad0fb2a7151a5654b6941c7"}, - {file = "orjson-3.11.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:124d5ba71fee9c9902c4a7baa9425e663f7f0aecf73d31d54fe3dd357d62c1a7"}, - {file = "orjson-3.11.3-cp39-cp39-win32.whl", hash = "sha256:22724d80ee5a815a44fc76274bb7ba2e7464f5564aacb6ecddaa9970a83e3225"}, - {file = "orjson-3.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:215c595c792a87d4407cb72dd5e0f6ee8e694ceeb7f9102b533c5a9bf2a916bb"}, - {file = "orjson-3.11.3.tar.gz", hash = "sha256:1c0603b1d2ffcd43a411d64797a19556ef76958aef1c182f22dc30860152a98a"}, + {file = "orjson-3.11.5-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:df9eadb2a6386d5ea2bfd81309c505e125cfc9ba2b1b99a97e60985b0b3665d1"}, + {file = "orjson-3.11.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccc70da619744467d8f1f49a8cadae5ec7bbe054e5232d95f92ed8737f8c5870"}, + {file = "orjson-3.11.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:073aab025294c2f6fc0807201c76fdaed86f8fc4be52c440fb78fbb759a1ac09"}, + {file = "orjson-3.11.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:835f26fa24ba0bb8c53ae2a9328d1706135b74ec653ed933869b74b6909e63fd"}, + {file = "orjson-3.11.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667c132f1f3651c14522a119e4dd631fad98761fa960c55e8e7430bb2a1ba4ac"}, + {file = "orjson-3.11.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:42e8961196af655bb5e63ce6c60d25e8798cd4dfbc04f4203457fa3869322c2e"}, + {file = "orjson-3.11.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75412ca06e20904c19170f8a24486c4e6c7887dea591ba18a1ab572f1300ee9f"}, + {file = "orjson-3.11.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6af8680328c69e15324b5af3ae38abbfcf9cbec37b5346ebfd52339c3d7e8a18"}, + {file = "orjson-3.11.5-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a86fe4ff4ea523eac8f4b57fdac319faf037d3c1be12405e6a7e86b3fbc4756a"}, + {file = "orjson-3.11.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e607b49b1a106ee2086633167033afbd63f76f2999e9236f638b06b112b24ea7"}, + {file = "orjson-3.11.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7339f41c244d0eea251637727f016b3d20050636695bc78345cce9029b189401"}, + {file = "orjson-3.11.5-cp310-cp310-win32.whl", hash = "sha256:8be318da8413cdbbce77b8c5fac8d13f6eb0f0db41b30bb598631412619572e8"}, + {file = "orjson-3.11.5-cp310-cp310-win_amd64.whl", hash = "sha256:b9f86d69ae822cabc2a0f6c099b43e8733dda788405cba2665595b7e8dd8d167"}, + {file = "orjson-3.11.5-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9c8494625ad60a923af6b2b0bd74107146efe9b55099e20d7740d995f338fcd8"}, + {file = "orjson-3.11.5-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:7bb2ce0b82bc9fd1168a513ddae7a857994b780b2945a8c51db4ab1c4b751ebc"}, + {file = "orjson-3.11.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67394d3becd50b954c4ecd24ac90b5051ee7c903d167459f93e77fc6f5b4c968"}, + {file = "orjson-3.11.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:298d2451f375e5f17b897794bcc3e7b821c0f32b4788b9bcae47ada24d7f3cf7"}, + {file = "orjson-3.11.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa5e4244063db8e1d87e0f54c3f7522f14b2dc937e65d5241ef0076a096409fd"}, + {file = "orjson-3.11.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1db2088b490761976c1b2e956d5d4e6409f3732e9d79cfa69f876c5248d1baf9"}, + {file = "orjson-3.11.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2ed66358f32c24e10ceea518e16eb3549e34f33a9d51f99ce23b0251776a1ef"}, + {file = "orjson-3.11.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2021afda46c1ed64d74b555065dbd4c2558d510d8cec5ea6a53001b3e5e82a9"}, + {file = "orjson-3.11.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b42ffbed9128e547a1647a3e50bc88ab28ae9daa61713962e0d3dd35e820c125"}, + {file = "orjson-3.11.5-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:8d5f16195bb671a5dd3d1dbea758918bada8f6cc27de72bd64adfbd748770814"}, + {file = "orjson-3.11.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c0e5d9f7a0227df2927d343a6e3859bebf9208b427c79bd31949abcc2fa32fa5"}, + {file = "orjson-3.11.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23d04c4543e78f724c4dfe656b3791b5f98e4c9253e13b2636f1af5d90e4a880"}, + {file = "orjson-3.11.5-cp311-cp311-win32.whl", hash = "sha256:c404603df4865f8e0afe981aa3c4b62b406e6d06049564d58934860b62b7f91d"}, + {file = "orjson-3.11.5-cp311-cp311-win_amd64.whl", hash = "sha256:9645ef655735a74da4990c24ffbd6894828fbfa117bc97c1edd98c282ecb52e1"}, + {file = "orjson-3.11.5-cp311-cp311-win_arm64.whl", hash = "sha256:1cbf2735722623fcdee8e712cbaaab9e372bbcb0c7924ad711b261c2eccf4a5c"}, + {file = "orjson-3.11.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:334e5b4bff9ad101237c2d799d9fd45737752929753bf4faf4b207335a416b7d"}, + {file = "orjson-3.11.5-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:ff770589960a86eae279f5d8aa536196ebda8273a2a07db2a54e82b93bc86626"}, + {file = "orjson-3.11.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed24250e55efbcb0b35bed7caaec8cedf858ab2f9f2201f17b8938c618c8ca6f"}, + {file = "orjson-3.11.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a66d7769e98a08a12a139049aac2f0ca3adae989817f8c43337455fbc7669b85"}, + {file = "orjson-3.11.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86cfc555bfd5794d24c6a1903e558b50644e5e68e6471d66502ce5cb5fdef3f9"}, + {file = "orjson-3.11.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a230065027bc2a025e944f9d4714976a81e7ecfa940923283bca7bbc1f10f626"}, + {file = "orjson-3.11.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b29d36b60e606df01959c4b982729c8845c69d1963f88686608be9ced96dbfaa"}, + {file = "orjson-3.11.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74099c6b230d4261fdc3169d50efc09abf38ace1a42ea2f9994b1d79153d477"}, + {file = "orjson-3.11.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e697d06ad57dd0c7a737771d470eedc18e68dfdefcdd3b7de7f33dfda5b6212e"}, + {file = "orjson-3.11.5-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e08ca8a6c851e95aaecc32bc44a5aa75d0ad26af8cdac7c77e4ed93acf3d5b69"}, + {file = "orjson-3.11.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e8b5f96c05fce7d0218df3fdfeb962d6b8cfff7e3e20264306b46dd8b217c0f3"}, + {file = "orjson-3.11.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ddbfdb5099b3e6ba6d6ea818f61997bb66de14b411357d24c4612cf1ebad08ca"}, + {file = "orjson-3.11.5-cp312-cp312-win32.whl", hash = "sha256:9172578c4eb09dbfcf1657d43198de59b6cef4054de385365060ed50c458ac98"}, + {file = "orjson-3.11.5-cp312-cp312-win_amd64.whl", hash = "sha256:2b91126e7b470ff2e75746f6f6ee32b9ab67b7a93c8ba1d15d3a0caaf16ec875"}, + {file = "orjson-3.11.5-cp312-cp312-win_arm64.whl", hash = "sha256:acbc5fac7e06777555b0722b8ad5f574739e99ffe99467ed63da98f97f9ca0fe"}, + {file = "orjson-3.11.5-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:3b01799262081a4c47c035dd77c1301d40f568f77cc7ec1bb7db5d63b0a01629"}, + {file = "orjson-3.11.5-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:61de247948108484779f57a9f406e4c84d636fa5a59e411e6352484985e8a7c3"}, + {file = "orjson-3.11.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:894aea2e63d4f24a7f04a1908307c738d0dce992e9249e744b8f4e8dd9197f39"}, + {file = "orjson-3.11.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ddc21521598dbe369d83d4d40338e23d4101dad21dae0e79fa20465dbace019f"}, + {file = "orjson-3.11.5-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cce16ae2f5fb2c53c3eafdd1706cb7b6530a67cc1c17abe8ec747f5cd7c0c51"}, + {file = "orjson-3.11.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e46c762d9f0e1cfb4ccc8515de7f349abbc95b59cb5a2bd68df5973fdef913f8"}, + {file = "orjson-3.11.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7345c759276b798ccd6d77a87136029e71e66a8bbf2d2755cbdde1d82e78706"}, + {file = "orjson-3.11.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75bc2e59e6a2ac1dd28901d07115abdebc4563b5b07dd612bf64260a201b1c7f"}, + {file = "orjson-3.11.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:54aae9b654554c3b4edd61896b978568c6daa16af96fa4681c9b5babd469f863"}, + {file = "orjson-3.11.5-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4bdd8d164a871c4ec773f9de0f6fe8769c2d6727879c37a9666ba4183b7f8228"}, + {file = "orjson-3.11.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a261fef929bcf98a60713bf5e95ad067cea16ae345d9a35034e73c3990e927d2"}, + {file = "orjson-3.11.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c028a394c766693c5c9909dec76b24f37e6a1b91999e8d0c0d5feecbe93c3e05"}, + {file = "orjson-3.11.5-cp313-cp313-win32.whl", hash = "sha256:2cc79aaad1dfabe1bd2d50ee09814a1253164b3da4c00a78c458d82d04b3bdef"}, + {file = "orjson-3.11.5-cp313-cp313-win_amd64.whl", hash = "sha256:ff7877d376add4e16b274e35a3f58b7f37b362abf4aa31863dadacdd20e3a583"}, + {file = "orjson-3.11.5-cp313-cp313-win_arm64.whl", hash = "sha256:59ac72ea775c88b163ba8d21b0177628bd015c5dd060647bbab6e22da3aad287"}, + {file = "orjson-3.11.5-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e446a8ea0a4c366ceafc7d97067bfd55292969143b57e3c846d87fc701e797a0"}, + {file = "orjson-3.11.5-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:53deb5addae9c22bbe3739298f5f2196afa881ea75944e7720681c7080909a81"}, + {file = "orjson-3.11.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82cd00d49d6063d2b8791da5d4f9d20539c5951f965e45ccf4e96d33505ce68f"}, + {file = "orjson-3.11.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3fd15f9fc8c203aeceff4fda211157fad114dde66e92e24097b3647a08f4ee9e"}, + {file = "orjson-3.11.5-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9df95000fbe6777bf9820ae82ab7578e8662051bb5f83d71a28992f539d2cda7"}, + {file = "orjson-3.11.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92a8d676748fca47ade5bc3da7430ed7767afe51b2f8100e3cd65e151c0eaceb"}, + {file = "orjson-3.11.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa0f513be38b40234c77975e68805506cad5d57b3dfd8fe3baa7f4f4051e15b4"}, + {file = "orjson-3.11.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1863e75b92891f553b7922ce4ee10ed06db061e104f2b7815de80cdcb135ad"}, + {file = "orjson-3.11.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d4be86b58e9ea262617b8ca6251a2f0d63cc132a6da4b5fcc8e0a4128782c829"}, + {file = "orjson-3.11.5-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:b923c1c13fa02084eb38c9c065afd860a5cff58026813319a06949c3af5732ac"}, + {file = "orjson-3.11.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:1b6bd351202b2cd987f35a13b5e16471cf4d952b42a73c391cc537974c43ef6d"}, + {file = "orjson-3.11.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bb150d529637d541e6af06bbe3d02f5498d628b7f98267ff87647584293ab439"}, + {file = "orjson-3.11.5-cp314-cp314-win32.whl", hash = "sha256:9cc1e55c884921434a84a0c3dd2699eb9f92e7b441d7f53f3941079ec6ce7499"}, + {file = "orjson-3.11.5-cp314-cp314-win_amd64.whl", hash = "sha256:a4f3cb2d874e03bc7767c8f88adaa1a9a05cecea3712649c3b58589ec7317310"}, + {file = "orjson-3.11.5-cp314-cp314-win_arm64.whl", hash = "sha256:38b22f476c351f9a1c43e5b07d8b5a02eb24a6ab8e75f700f7d479d4568346a5"}, + {file = "orjson-3.11.5-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1b280e2d2d284a6713b0cfec7b08918ebe57df23e3f76b27586197afca3cb1e9"}, + {file = "orjson-3.11.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c8d8a112b274fae8c5f0f01954cb0480137072c271f3f4958127b010dfefaec"}, + {file = "orjson-3.11.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f0a2ae6f09ac7bd47d2d5a5305c1d9ed08ac057cda55bb0a49fa506f0d2da00"}, + {file = "orjson-3.11.5-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c0d87bd1896faac0d10b4f849016db81a63e4ec5df38757ffae84d45ab38aa71"}, + {file = "orjson-3.11.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:801a821e8e6099b8c459ac7540b3c32dba6013437c57fdcaec205b169754f38c"}, + {file = "orjson-3.11.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69a0f6ac618c98c74b7fbc8c0172ba86f9e01dbf9f62aa0b1776c2231a7bffe5"}, + {file = "orjson-3.11.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea7339bdd22e6f1060c55ac31b6a755d86a5b2ad3657f2669ec243f8e3b2bdb"}, + {file = "orjson-3.11.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4dad582bc93cef8f26513e12771e76385a7e6187fd713157e971c784112aad56"}, + {file = "orjson-3.11.5-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:0522003e9f7fba91982e83a97fec0708f5a714c96c4209db7104e6b9d132f111"}, + {file = "orjson-3.11.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:7403851e430a478440ecc1258bcbacbfbd8175f9ac1e39031a7121dd0de05ff8"}, + {file = "orjson-3.11.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5f691263425d3177977c8d1dd896cde7b98d93cbf390b2544a090675e83a6a0a"}, + {file = "orjson-3.11.5-cp39-cp39-win32.whl", hash = "sha256:61026196a1c4b968e1b1e540563e277843082e9e97d78afa03eb89315af531f1"}, + {file = "orjson-3.11.5-cp39-cp39-win_amd64.whl", hash = "sha256:09b94b947ac08586af635ef922d69dc9bc63321527a3a04647f4986a73f4bd30"}, + {file = "orjson-3.11.5.tar.gz", hash = "sha256:82393ab47b4fe44ffd0a7659fa9cfaacc717eb617c93cde83795f14af5c2e9d5"}, ] [[package]] @@ -1723,7 +2396,7 @@ version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["docs"] +groups = ["docs", "vectorbt"] files = [ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, @@ -1751,7 +2424,7 @@ version = "2.3.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" -groups = ["main"] +groups = ["main", "vectorbt"] files = [ {file = "pandas-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:376c6446ae31770764215a6c937f72d917f214b43560603cd60da6408f183b6c"}, {file = "pandas-2.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e19d192383eab2f4ceb30b412b22ea30690c9e618f78870357ae1d682912015a"}, @@ -1841,18 +2514,40 @@ sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-d test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.9.2)"] +[[package]] +name = "parso" +version = "0.8.5" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +groups = ["vectorbt"] +files = [ + {file = "parso-0.8.5-py2.py3-none-any.whl", hash = "sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887"}, + {file = "parso-0.8.5.tar.gz", hash = "sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a"}, +] + +[package.extras] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["docopt", "pytest"] + [[package]] name = "pathspec" -version = "0.12.1" +version = "1.0.3" description = "Utility library for gitignore style pattern matching of file paths." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["docs"] files = [ - {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, - {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, + {file = "pathspec-1.0.3-py3-none-any.whl", hash = "sha256:e80767021c1cc524aa3fb14bedda9c34406591343cc42797b386ce7b9354fb6c"}, + {file = "pathspec-1.0.3.tar.gz", hash = "sha256:bac5cf97ae2c2876e2d25ebb15078eb04d76e4b98921ee31c6f85ade8b59444d"}, ] +[package.extras] +hyperscan = ["hyperscan (>=0.7)"] +optional = ["typing-extensions (>=4)"] +re2 = ["google-re2 (>=1.1)"] +tests = ["pytest (>=9)", "typing-extensions (>=4.15)"] + [[package]] name = "peewee" version = "3.17.3" @@ -1864,16 +2559,141 @@ files = [ {file = "peewee-3.17.3.tar.gz", hash = "sha256:ef15f90b628e41a584be8306cdc3243c51f73ce88b06154d9572f6d0284a0169"}, ] +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +groups = ["vectorbt"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pillow" +version = "12.1.0" +description = "Python Imaging Library (fork)" +optional = false +python-versions = ">=3.10" +groups = ["vectorbt"] +files = [ + {file = "pillow-12.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:fb125d860738a09d363a88daa0f59c4533529a90e564785e20fe875b200b6dbd"}, + {file = "pillow-12.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cad302dc10fac357d3467a74a9561c90609768a6f73a1923b0fd851b6486f8b0"}, + {file = "pillow-12.1.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a40905599d8079e09f25027423aed94f2823adaf2868940de991e53a449e14a8"}, + {file = "pillow-12.1.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:92a7fe4225365c5e3a8e598982269c6d6698d3e783b3b1ae979e7819f9cd55c1"}, + {file = "pillow-12.1.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f10c98f49227ed8383d28174ee95155a675c4ed7f85e2e573b04414f7e371bda"}, + {file = "pillow-12.1.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8637e29d13f478bc4f153d8daa9ffb16455f0a6cb287da1b432fdad2bfbd66c7"}, + {file = "pillow-12.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:21e686a21078b0f9cb8c8a961d99e6a4ddb88e0fc5ea6e130172ddddc2e5221a"}, + {file = "pillow-12.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2415373395a831f53933c23ce051021e79c8cd7979822d8cc478547a3f4da8ef"}, + {file = "pillow-12.1.0-cp310-cp310-win32.whl", hash = "sha256:e75d3dba8fc1ddfec0cd752108f93b83b4f8d6ab40e524a95d35f016b9683b09"}, + {file = "pillow-12.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:64efdf00c09e31efd754448a383ea241f55a994fd079866b92d2bbff598aad91"}, + {file = "pillow-12.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:f188028b5af6b8fb2e9a76ac0f841a575bd1bd396e46ef0840d9b88a48fdbcea"}, + {file = "pillow-12.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:a83e0850cb8f5ac975291ebfc4170ba481f41a28065277f7f735c202cd8e0af3"}, + {file = "pillow-12.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b6e53e82ec2db0717eabb276aa56cf4e500c9a7cec2c2e189b55c24f65a3e8c0"}, + {file = "pillow-12.1.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:40a8e3b9e8773876d6e30daed22f016509e3987bab61b3b7fe309d7019a87451"}, + {file = "pillow-12.1.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:800429ac32c9b72909c671aaf17ecd13110f823ddb7db4dfef412a5587c2c24e"}, + {file = "pillow-12.1.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b022eaaf709541b391ee069f0022ee5b36c709df71986e3f7be312e46f42c84"}, + {file = "pillow-12.1.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f345e7bc9d7f368887c712aa5054558bad44d2a301ddf9248599f4161abc7c0"}, + {file = "pillow-12.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d70347c8a5b7ccd803ec0c85c8709f036e6348f1e6a5bf048ecd9c64d3550b8b"}, + {file = "pillow-12.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1fcc52d86ce7a34fd17cb04e87cfdb164648a3662a6f20565910a99653d66c18"}, + {file = "pillow-12.1.0-cp311-cp311-win32.whl", hash = "sha256:3ffaa2f0659e2f740473bcf03c702c39a8d4b2b7ffc629052028764324842c64"}, + {file = "pillow-12.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:806f3987ffe10e867bab0ddad45df1148a2b98221798457fa097ad85d6e8bc75"}, + {file = "pillow-12.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:9f5fefaca968e700ad1a4a9de98bf0869a94e397fe3524c4c9450c1445252304"}, + {file = "pillow-12.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a332ac4ccb84b6dde65dbace8431f3af08874bf9770719d32a635c4ef411b18b"}, + {file = "pillow-12.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:907bfa8a9cb790748a9aa4513e37c88c59660da3bcfffbd24a7d9e6abf224551"}, + {file = "pillow-12.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efdc140e7b63b8f739d09a99033aa430accce485ff78e6d311973a67b6bf3208"}, + {file = "pillow-12.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bef9768cab184e7ae6e559c032e95ba8d07b3023c289f79a2bd36e8bf85605a5"}, + {file = "pillow-12.1.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:742aea052cf5ab5034a53c3846165bc3ce88d7c38e954120db0ab867ca242661"}, + {file = "pillow-12.1.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6dfc2af5b082b635af6e08e0d1f9f1c4e04d17d4e2ca0ef96131e85eda6eb17"}, + {file = "pillow-12.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:609e89d9f90b581c8d16358c9087df76024cf058fa693dd3e1e1620823f39670"}, + {file = "pillow-12.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:43b4899cfd091a9693a1278c4982f3e50f7fb7cff5153b05174b4afc9593b616"}, + {file = "pillow-12.1.0-cp312-cp312-win32.whl", hash = "sha256:aa0c9cc0b82b14766a99fbe6084409972266e82f459821cd26997a488a7261a7"}, + {file = "pillow-12.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:d70534cea9e7966169ad29a903b99fc507e932069a881d0965a1a84bb57f6c6d"}, + {file = "pillow-12.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:65b80c1ee7e14a87d6a068dd3b0aea268ffcabfe0498d38661b00c5b4b22e74c"}, + {file = "pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:7b5dd7cbae20285cdb597b10eb5a2c13aa9de6cde9bb64a3c1317427b1db1ae1"}, + {file = "pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:29a4cef9cb672363926f0470afc516dbf7305a14d8c54f7abbb5c199cd8f8179"}, + {file = "pillow-12.1.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:681088909d7e8fa9e31b9799aaa59ba5234c58e5e4f1951b4c4d1082a2e980e0"}, + {file = "pillow-12.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:983976c2ab753166dc66d36af6e8ec15bb511e4a25856e2227e5f7e00a160587"}, + {file = "pillow-12.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:db44d5c160a90df2d24a24760bbd37607d53da0b34fb546c4c232af7192298ac"}, + {file = "pillow-12.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6b7a9d1db5dad90e2991645874f708e87d9a3c370c243c2d7684d28f7e133e6b"}, + {file = "pillow-12.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6258f3260986990ba2fa8a874f8b6e808cf5abb51a94015ca3dc3c68aa4f30ea"}, + {file = "pillow-12.1.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e115c15e3bc727b1ca3e641a909f77f8ca72a64fff150f666fcc85e57701c26c"}, + {file = "pillow-12.1.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6741e6f3074a35e47c77b23a4e4f2d90db3ed905cb1c5e6e0d49bff2045632bc"}, + {file = "pillow-12.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:935b9d1aed48fcfb3f838caac506f38e29621b44ccc4f8a64d575cb1b2a88644"}, + {file = "pillow-12.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5fee4c04aad8932da9f8f710af2c1a15a83582cfb884152a9caa79d4efcdbf9c"}, + {file = "pillow-12.1.0-cp313-cp313-win32.whl", hash = "sha256:a786bf667724d84aa29b5db1c61b7bfdde380202aaca12c3461afd6b71743171"}, + {file = "pillow-12.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:461f9dfdafa394c59cd6d818bdfdbab4028b83b02caadaff0ffd433faf4c9a7a"}, + {file = "pillow-12.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:9212d6b86917a2300669511ed094a9406888362e085f2431a7da985a6b124f45"}, + {file = "pillow-12.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:00162e9ca6d22b7c3ee8e61faa3c3253cd19b6a37f126cad04f2f88b306f557d"}, + {file = "pillow-12.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7d6daa89a00b58c37cb1747ec9fb7ac3bc5ffd5949f5888657dfddde6d1312e0"}, + {file = "pillow-12.1.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e2479c7f02f9d505682dc47df8c0ea1fc5e264c4d1629a5d63fe3e2334b89554"}, + {file = "pillow-12.1.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f188d580bd870cda1e15183790d1cc2fa78f666e76077d103edf048eed9c356e"}, + {file = "pillow-12.1.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0fde7ec5538ab5095cc02df38ee99b0443ff0e1c847a045554cf5f9af1f4aa82"}, + {file = "pillow-12.1.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ed07dca4a8464bada6139ab38f5382f83e5f111698caf3191cb8dbf27d908b4"}, + {file = "pillow-12.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f45bd71d1fa5e5749587613037b172e0b3b23159d1c00ef2fc920da6f470e6f0"}, + {file = "pillow-12.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:277518bf4fe74aa91489e1b20577473b19ee70fb97c374aa50830b279f25841b"}, + {file = "pillow-12.1.0-cp313-cp313t-win32.whl", hash = "sha256:7315f9137087c4e0ee73a761b163fc9aa3b19f5f606a7fc08d83fd3e4379af65"}, + {file = "pillow-12.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:0ddedfaa8b5f0b4ffbc2fa87b556dc59f6bb4ecb14a53b33f9189713ae8053c0"}, + {file = "pillow-12.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:80941e6d573197a0c28f394753de529bb436b1ca990ed6e765cf42426abc39f8"}, + {file = "pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:5cb7bc1966d031aec37ddb9dcf15c2da5b2e9f7cc3ca7c54473a20a927e1eb91"}, + {file = "pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:97e9993d5ed946aba26baf9c1e8cf18adbab584b99f452ee72f7ee8acb882796"}, + {file = "pillow-12.1.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:414b9a78e14ffeb98128863314e62c3f24b8a86081066625700b7985b3f529bd"}, + {file = "pillow-12.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e6bdb408f7c9dd2a5ff2b14a3b0bb6d4deb29fb9961e6eb3ae2031ae9a5cec13"}, + {file = "pillow-12.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3413c2ae377550f5487991d444428f1a8ae92784aac79caa8b1e3b89b175f77e"}, + {file = "pillow-12.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e5dcbe95016e88437ecf33544ba5db21ef1b8dd6e1b434a2cb2a3d605299e643"}, + {file = "pillow-12.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d0a7735df32ccbcc98b98a1ac785cc4b19b580be1bdf0aeb5c03223220ea09d5"}, + {file = "pillow-12.1.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c27407a2d1b96774cbc4a7594129cc027339fd800cd081e44497722ea1179de"}, + {file = "pillow-12.1.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15c794d74303828eaa957ff8070846d0efe8c630901a1c753fdc63850e19ecd9"}, + {file = "pillow-12.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c990547452ee2800d8506c4150280757f88532f3de2a58e3022e9b179107862a"}, + {file = "pillow-12.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b63e13dd27da389ed9475b3d28510f0f954bca0041e8e551b2a4eb1eab56a39a"}, + {file = "pillow-12.1.0-cp314-cp314-win32.whl", hash = "sha256:1a949604f73eb07a8adab38c4fe50791f9919344398bdc8ac6b307f755fc7030"}, + {file = "pillow-12.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:4f9f6a650743f0ddee5593ac9e954ba1bdbc5e150bc066586d4f26127853ab94"}, + {file = "pillow-12.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:808b99604f7873c800c4840f55ff389936ef1948e4e87645eaf3fccbc8477ac4"}, + {file = "pillow-12.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:bc11908616c8a283cf7d664f77411a5ed2a02009b0097ff8abbba5e79128ccf2"}, + {file = "pillow-12.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:896866d2d436563fa2a43a9d72f417874f16b5545955c54a64941e87c1376c61"}, + {file = "pillow-12.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8e178e3e99d3c0ea8fc64b88447f7cac8ccf058af422a6cedc690d0eadd98c51"}, + {file = "pillow-12.1.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:079af2fb0c599c2ec144ba2c02766d1b55498e373b3ac64687e43849fbbef5bc"}, + {file = "pillow-12.1.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdec5e43377761c5dbca620efb69a77f6855c5a379e32ac5b158f54c84212b14"}, + {file = "pillow-12.1.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:565c986f4b45c020f5421a4cea13ef294dde9509a8577f29b2fc5edc7587fff8"}, + {file = "pillow-12.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:43aca0a55ce1eefc0aefa6253661cb54571857b1a7b2964bd8a1e3ef4b729924"}, + {file = "pillow-12.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0deedf2ea233722476b3a81e8cdfbad786f7adbed5d848469fa59fe52396e4ef"}, + {file = "pillow-12.1.0-cp314-cp314t-win32.whl", hash = "sha256:b17fbdbe01c196e7e159aacb889e091f28e61020a8abeac07b68079b6e626988"}, + {file = "pillow-12.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27b9baecb428899db6c0de572d6d305cfaf38ca1596b5c0542a5182e3e74e8c6"}, + {file = "pillow-12.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:f61333d817698bdcdd0f9d7793e365ac3d2a21c1f1eb02b32ad6aefb8d8ea831"}, + {file = "pillow-12.1.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ca94b6aac0d7af2a10ba08c0f888b3d5114439b6b3ef39968378723622fed377"}, + {file = "pillow-12.1.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:351889afef0f485b84078ea40fe33727a0492b9af3904661b0abbafee0355b72"}, + {file = "pillow-12.1.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb0984b30e973f7e2884362b7d23d0a348c7143ee559f38ef3eaab640144204c"}, + {file = "pillow-12.1.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:84cabc7095dd535ca934d57e9ce2a72ffd216e435a84acb06b2277b1de2689bd"}, + {file = "pillow-12.1.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53d8b764726d3af1a138dd353116f774e3862ec7e3794e0c8781e30db0f35dfc"}, + {file = "pillow-12.1.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5da841d81b1a05ef940a8567da92decaa15bc4d7dedb540a8c219ad83d91808a"}, + {file = "pillow-12.1.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:75af0b4c229ac519b155028fa1be632d812a519abba9b46b20e50c6caa184f19"}, + {file = "pillow-12.1.0.tar.gz", hash = "sha256:5c5ae0a06e9ea030ab786b0251b32c7e4ce10e58d983c0d5c56029455180b5b9"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +test-arrow = ["arro3-compute", "arro3-core", "nanoarrow", "pyarrow"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma (>=5)", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"] +xmp = ["defusedxml"] + [[package]] name = "platformdirs" -version = "4.5.0" +version = "4.5.1" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.10" groups = ["main", "docs"] files = [ - {file = "platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3"}, - {file = "platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312"}, + {file = "platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31"}, + {file = "platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda"}, ] [package.extras] @@ -1881,20 +2701,44 @@ docs = ["furo (>=2025.9.25)", "proselint (>=0.14)", "sphinx (>=8.2.3)", "sphinx- test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.4.2)", "pytest-cov (>=7)", "pytest-mock (>=3.15.1)"] type = ["mypy (>=1.18.2)"] +[[package]] +name = "plotly" +version = "6.5.2" +description = "An open-source interactive data visualization library for Python" +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "plotly-6.5.2-py3-none-any.whl", hash = "sha256:91757653bd9c550eeea2fa2404dba6b85d1e366d54804c340b2c874e5a7eb4a4"}, + {file = "plotly-6.5.2.tar.gz", hash = "sha256:7478555be0198562d1435dee4c308268187553cc15516a2f4dd034453699e393"}, +] + +[package.dependencies] +narwhals = ">=1.15.1" +packaging = "*" + +[package.extras] +dev = ["plotly[dev-optional]"] +dev-build = ["build", "jupyter", "plotly[dev-core]"] +dev-core = ["pytest", "requests", "ruff (==0.11.12)"] +dev-optional = ["anywidget", "colorcet", "fiona (<=1.9.6) ; python_version <= \"3.8\"", "geopandas", "inflect", "numpy", "orjson", "pandas", "pdfrw", "pillow", "plotly-geo", "plotly[dev-build]", "plotly[kaleido]", "polars[timezone]", "pyarrow", "pyshp", "pytz", "scikit-image", "scipy", "shapely", "statsmodels", "vaex ; python_version <= \"3.9\"", "xarray"] +express = ["numpy"] +kaleido = ["kaleido (>=1.1.0)"] + [[package]] name = "polars" -version = "1.34.0" +version = "1.37.1" description = "Blazingly fast DataFrame library" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "polars-1.34.0-py3-none-any.whl", hash = "sha256:40d2f357b4d9e447ad28bd2c9923e4318791a7c18eb68f31f1fbf11180f41391"}, - {file = "polars-1.34.0.tar.gz", hash = "sha256:5de5f871027db4b11bcf39215a2d6b13b4a80baf8a55c5862d4ebedfd5cd4013"}, + {file = "polars-1.37.1-py3-none-any.whl", hash = "sha256:377fed8939a2f1223c1563cfabdc7b4a3d6ff846efa1f2ddeb8644fafd9b1aff"}, + {file = "polars-1.37.1.tar.gz", hash = "sha256:0309e2a4633e712513401964b4d95452f124ceabf7aec6db50affb9ced4a274e"}, ] [package.dependencies] -polars-runtime-32 = "1.34.0" +polars-runtime-32 = "1.37.1" [package.extras] adbc = ["adbc-driver-manager[dbapi]", "adbc-driver-sqlite[dbapi]"] @@ -1914,11 +2758,11 @@ numpy = ["numpy (>=1.16.0)"] openpyxl = ["openpyxl (>=3.0.0)"] pandas = ["pandas", "polars[pyarrow]"] plot = ["altair (>=5.4.0)"] -polars-cloud = ["polars_cloud (>=0.0.1a1)"] +polars-cloud = ["polars_cloud (>=0.4.0)"] pyarrow = ["pyarrow (>=7.0.0)"] pydantic = ["pydantic"] -rt64 = ["polars-runtime-64 (==1.34.0)"] -rtcompat = ["polars-runtime-compat (==1.34.0)"] +rt64 = ["polars-runtime-64 (==1.37.1)"] +rtcompat = ["polars-runtime-compat (==1.37.1)"] sqlalchemy = ["polars[pandas]", "sqlalchemy"] style = ["great-tables (>=0.8.0)"] timezone = ["tzdata ; platform_system == \"Windows\""] @@ -1927,41 +2771,86 @@ xlsxwriter = ["xlsxwriter"] [[package]] name = "polars-runtime-32" -version = "1.34.0" +version = "1.37.1" description = "Blazingly fast DataFrame library" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "polars_runtime_32-1.34.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:2878f9951e91121afe60c25433ef270b9a221e6ebf3de5f6642346b38cab3f03"}, - {file = "polars_runtime_32-1.34.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:fbc329c7d34a924228cc5dcdbbd4696d94411a3a5b15ad8bb868634c204e1951"}, - {file = "polars_runtime_32-1.34.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93fa51d88a2d12ea996a5747aad5647d22a86cce73c80f208e61f487b10bc448"}, - {file = "polars_runtime_32-1.34.0-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:79e4d696392c6d8d51f4347f0b167c52eef303c9d87093c0c68e8651198735b7"}, - {file = "polars_runtime_32-1.34.0-cp39-abi3-win_amd64.whl", hash = "sha256:2501d6b29d9001ea5ea2fd9b598787e10ddf45d8c4a87c2bead75159e8a15711"}, - {file = "polars_runtime_32-1.34.0-cp39-abi3-win_arm64.whl", hash = "sha256:f9ed1765378dfe0bcd1ac5ec570dd9eab27ea728bbc980cc9a76eebc55586559"}, - {file = "polars_runtime_32-1.34.0.tar.gz", hash = "sha256:ebe6f865128a0d833f53a3f6828360761ad86d1698bceb22bef9fd999500dc1c"}, + {file = "polars_runtime_32-1.37.1-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:0b8d4d73ea9977d3731927740e59d814647c5198bdbe359bcf6a8bfce2e79771"}, + {file = "polars_runtime_32-1.37.1-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:c682bf83f5f352e5e02f5c16c652c48ca40442f07b236f30662b22217320ce76"}, + {file = "polars_runtime_32-1.37.1-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc82b5bbe70ca1a4b764eed1419f6336752d6ba9fc1245388d7f8b12438afa2c"}, + {file = "polars_runtime_32-1.37.1-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8362d11ac5193b994c7e9048ffe22ccfb976699cfbf6e128ce0302e06728894"}, + {file = "polars_runtime_32-1.37.1-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:04f5d5a2f013dca7391b7d8e7672fa6d37573a87f1d45d3dd5f0d9b5565a4b0f"}, + {file = "polars_runtime_32-1.37.1-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fbfde7c0ca8209eeaed546e4a32cca1319189aa61c5f0f9a2b4494262bd0c689"}, + {file = "polars_runtime_32-1.37.1-cp310-abi3-win_amd64.whl", hash = "sha256:da3d3642ae944e18dd17109d2a3036cb94ce50e5495c5023c77b1599d4c861bc"}, + {file = "polars_runtime_32-1.37.1-cp310-abi3-win_arm64.whl", hash = "sha256:55f2c4847a8d2e267612f564de7b753a4bde3902eaabe7b436a0a4abf75949a0"}, + {file = "polars_runtime_32-1.37.1.tar.gz", hash = "sha256:68779d4a691da20a5eb767d74165a8f80a2bdfbde4b54acf59af43f7fa028d8f"}, ] +[[package]] +name = "prompt-toolkit" +version = "3.0.52" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955"}, + {file = "prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855"}, +] + +[package.dependencies] +wcwidth = "*" + [[package]] name = "protobuf" -version = "6.33.0" +version = "6.33.4" description = "" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "protobuf-6.33.0-cp310-abi3-win32.whl", hash = "sha256:d6101ded078042a8f17959eccd9236fb7a9ca20d3b0098bbcb91533a5680d035"}, - {file = "protobuf-6.33.0-cp310-abi3-win_amd64.whl", hash = "sha256:9a031d10f703f03768f2743a1c403af050b6ae1f3480e9c140f39c45f81b13ee"}, - {file = "protobuf-6.33.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:905b07a65f1a4b72412314082c7dbfae91a9e8b68a0cc1577515f8df58ecf455"}, - {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:e0697ece353e6239b90ee43a9231318302ad8353c70e6e45499fa52396debf90"}, - {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:e0a1715e4f27355afd9570f3ea369735afc853a6c3951a6afe1f80d8569ad298"}, - {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:35be49fd3f4fefa4e6e2aacc35e8b837d6703c37a2168a55ac21e9b1bc7559ef"}, - {file = "protobuf-6.33.0-cp39-cp39-win32.whl", hash = "sha256:cd33a8e38ea3e39df66e1bbc462b076d6e5ba3a4ebbde58219d777223a7873d3"}, - {file = "protobuf-6.33.0-cp39-cp39-win_amd64.whl", hash = "sha256:c963e86c3655af3a917962c9619e1a6b9670540351d7af9439d06064e3317cc9"}, - {file = "protobuf-6.33.0-py3-none-any.whl", hash = "sha256:25c9e1963c6734448ea2d308cfa610e692b801304ba0908d7bfa564ac5132995"}, - {file = "protobuf-6.33.0.tar.gz", hash = "sha256:140303d5c8d2037730c548f8c7b93b20bb1dc301be280c378b82b8894589c954"}, + {file = "protobuf-6.33.4-cp310-abi3-win32.whl", hash = "sha256:918966612c8232fc6c24c78e1cd89784307f5814ad7506c308ee3cf86662850d"}, + {file = "protobuf-6.33.4-cp310-abi3-win_amd64.whl", hash = "sha256:8f11ffae31ec67fc2554c2ef891dcb561dae9a2a3ed941f9e134c2db06657dbc"}, + {file = "protobuf-6.33.4-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2fe67f6c014c84f655ee06f6f66213f9254b3a8b6bda6cda0ccd4232c73c06f0"}, + {file = "protobuf-6.33.4-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:757c978f82e74d75cba88eddec479df9b99a42b31193313b75e492c06a51764e"}, + {file = "protobuf-6.33.4-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:c7c64f259c618f0bef7bee042075e390debbf9682334be2b67408ec7c1c09ee6"}, + {file = "protobuf-6.33.4-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:3df850c2f8db9934de4cf8f9152f8dc2558f49f298f37f90c517e8e5c84c30e9"}, + {file = "protobuf-6.33.4-cp39-cp39-win32.whl", hash = "sha256:955478a89559fa4568f5a81dce77260eabc5c686f9e8366219ebd30debf06aa6"}, + {file = "protobuf-6.33.4-cp39-cp39-win_amd64.whl", hash = "sha256:0f12ddbf96912690c3582f9dffb55530ef32015ad8e678cd494312bd78314c4f"}, + {file = "protobuf-6.33.4-py3-none-any.whl", hash = "sha256:1fe3730068fcf2e595816a6c34fe66eeedd37d51d0400b72fabc848811fdc1bc"}, + {file = "protobuf-6.33.4.tar.gz", hash = "sha256:dc2e61bca3b10470c1912d166fe0af67bfc20eb55971dcef8dfa48ce14f0ed91"}, ] +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +groups = ["vectorbt"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +groups = ["vectorbt"] +files = [ + {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, + {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, +] + +[package.extras] +tests = ["pytest"] + [[package]] name = "pyarrow" version = "19.0.1" @@ -2032,19 +2921,19 @@ files = [ [[package]] name = "pydantic" -version = "2.12.3" +version = "2.12.5" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pydantic-2.12.3-py3-none-any.whl", hash = "sha256:6986454a854bc3bc6e5443e1369e06a3a456af9d339eda45510f517d9ea5c6bf"}, - {file = "pydantic-2.12.3.tar.gz", hash = "sha256:1da1c82b0fc140bb0103bc1441ffe062154c8d38491189751ee00fd8ca65ce74"}, + {file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"}, + {file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.41.4" +pydantic-core = "2.41.5" typing-extensions = ">=4.14.1" typing-inspection = ">=0.4.2" @@ -2054,129 +2943,133 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows [[package]] name = "pydantic-core" -version = "2.41.4" +version = "2.41.5" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pydantic_core-2.41.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2442d9a4d38f3411f22eb9dd0912b7cbf4b7d5b6c92c4173b75d3e1ccd84e36e"}, - {file = "pydantic_core-2.41.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:30a9876226dda131a741afeab2702e2d127209bde3c65a2b8133f428bc5d006b"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d55bbac04711e2980645af68b97d445cdbcce70e5216de444a6c4b6943ebcccd"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1d778fb7849a42d0ee5927ab0f7453bf9f85eef8887a546ec87db5ddb178945"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b65077a4693a98b90ec5ad8f203ad65802a1b9b6d4a7e48066925a7e1606706"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62637c769dee16eddb7686bf421be48dfc2fae93832c25e25bc7242e698361ba"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfe3aa529c8f501babf6e502936b9e8d4698502b2cfab41e17a028d91b1ac7b"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca2322da745bf2eeb581fc9ea3bbb31147702163ccbcbf12a3bb630e4bf05e1d"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e8cd3577c796be7231dcf80badcf2e0835a46665eaafd8ace124d886bab4d700"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:1cae8851e174c83633f0833e90636832857297900133705ee158cf79d40f03e6"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a26d950449aae348afe1ac8be5525a00ae4235309b729ad4d3399623125b43c9"}, - {file = "pydantic_core-2.41.4-cp310-cp310-win32.whl", hash = "sha256:0cf2a1f599efe57fa0051312774280ee0f650e11152325e41dfd3018ef2c1b57"}, - {file = "pydantic_core-2.41.4-cp310-cp310-win_amd64.whl", hash = "sha256:a8c2e340d7e454dc3340d3d2e8f23558ebe78c98aa8f68851b04dcb7bc37abdc"}, - {file = "pydantic_core-2.41.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:28ff11666443a1a8cf2a044d6a545ebffa8382b5f7973f22c36109205e65dc80"}, - {file = "pydantic_core-2.41.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61760c3925d4633290292bad462e0f737b840508b4f722247d8729684f6539ae"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae547b7315d055b0de2ec3965643b0ab82ad0106a7ffd29615ee9f266a02827"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef9ee5471edd58d1fcce1c80ffc8783a650e3e3a193fe90d52e43bb4d87bff1f"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15dd504af121caaf2c95cb90c0ebf71603c53de98305621b94da0f967e572def"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a926768ea49a8af4d36abd6a8968b8790f7f76dd7cbd5a4c180db2b4ac9a3a2"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916b9b7d134bff5440098a4deb80e4cb623e68974a87883299de9124126c2a8"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cf90535979089df02e6f17ffd076f07237efa55b7343d98760bde8743c4b265"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7533c76fa647fade2d7ec75ac5cc079ab3f34879626dae5689b27790a6cf5a5c"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:37e516bca9264cbf29612539801ca3cd5d1be465f940417b002905e6ed79d38a"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0c19cb355224037c83642429b8ce261ae108e1c5fbf5c028bac63c77b0f8646e"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win32.whl", hash = "sha256:09c2a60e55b357284b5f31f5ab275ba9f7f70b7525e18a132ec1f9160b4f1f03"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win_amd64.whl", hash = "sha256:711156b6afb5cb1cb7c14a2cc2c4a8b4c717b69046f13c6b332d8a0a8f41ca3e"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win_arm64.whl", hash = "sha256:6cb9cf7e761f4f8a8589a45e49ed3c0d92d1d696a45a6feaee8c904b26efc2db"}, - {file = "pydantic_core-2.41.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ab06d77e053d660a6faaf04894446df7b0a7e7aba70c2797465a0a1af00fc887"}, - {file = "pydantic_core-2.41.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c53ff33e603a9c1179a9364b0a24694f183717b2e0da2b5ad43c316c956901b2"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:304c54176af2c143bd181d82e77c15c41cbacea8872a2225dd37e6544dce9999"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025ba34a4cf4fb32f917d5d188ab5e702223d3ba603be4d8aca2f82bede432a4"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f5f30c402ed58f90c70e12eff65547d3ab74685ffe8283c719e6bead8ef53f"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd96e5d15385d301733113bcaa324c8bcf111275b7675a9c6e88bfb19fc05e3b"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f348cbb44fae6e9653c1055db7e29de67ea6a9ca03a5fa2c2e11a47cff0e47"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec22626a2d14620a83ca583c6f5a4080fa3155282718b6055c2ea48d3ef35970"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a95d4590b1f1a43bf33ca6d647b990a88f4a3824a8c4572c708f0b45a5290ed"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:f9672ab4d398e1b602feadcffcdd3af44d5f5e6ddc15bc7d15d376d47e8e19f8"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:84d8854db5f55fead3b579f04bda9a36461dab0730c5d570e1526483e7bb8431"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win32.whl", hash = "sha256:9be1c01adb2ecc4e464392c36d17f97e9110fbbc906bcbe1c943b5b87a74aabd"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win_amd64.whl", hash = "sha256:d682cf1d22bab22a5be08539dca3d1593488a99998f9f412137bc323179067ff"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win_arm64.whl", hash = "sha256:833eebfd75a26d17470b58768c1834dfc90141b7afc6eb0429c21fc5a21dcfb8"}, - {file = "pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746"}, - {file = "pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89"}, - {file = "pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1"}, - {file = "pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0"}, - {file = "pydantic_core-2.41.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:646e76293345954acea6966149683047b7b2ace793011922208c8e9da12b0062"}, - {file = "pydantic_core-2.41.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc8e85a63085a137d286e2791037f5fdfff0aabb8b899483ca9c496dd5797338"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:692c622c8f859a17c156492783902d8370ac7e121a611bd6fe92cc71acf9ee8d"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1e2906efb1031a532600679b424ef1d95d9f9fb507f813951f23320903adbd7"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04e2f7f8916ad3ddd417a7abdd295276a0bf216993d9318a5d61cc058209166"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df649916b81822543d1c8e0e1d079235f68acdc7d270c911e8425045a8cfc57e"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66c529f862fdba70558061bb936fe00ddbaaa0c647fd26e4a4356ef1d6561891"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc3b4c5a1fd3a311563ed866c2c9b62da06cb6398bee186484ce95c820db71cb"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6e0fc40d84448f941df9b3334c4b78fe42f36e3bf631ad54c3047a0cdddc2514"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:44e7625332683b6c1c8b980461475cde9595eff94447500e80716db89b0da005"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:170ee6835f6c71081d031ef1c3b4dc4a12b9efa6a9540f93f95b82f3c7571ae8"}, - {file = "pydantic_core-2.41.4-cp39-cp39-win32.whl", hash = "sha256:3adf61415efa6ce977041ba9745183c0e1f637ca849773afa93833e04b163feb"}, - {file = "pydantic_core-2.41.4-cp39-cp39-win_amd64.whl", hash = "sha256:a238dd3feee263eeaeb7dc44aea4ba1364682c4f9f9467e6af5596ba322c2332"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:a1b2cfec3879afb742a7b0bcfa53e4f22ba96571c9e54d6a3afe1052d17d843b"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:d175600d975b7c244af6eb9c9041f10059f20b8bbffec9e33fdd5ee3f67cdc42"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f184d657fa4947ae5ec9c47bd7e917730fa1cbb78195037e32dcbab50aca5ee"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed810568aeffed3edc78910af32af911c835cc39ebbfacd1f0ab5dd53028e5c"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:4f5d640aeebb438517150fdeec097739614421900e4a08db4a3ef38898798537"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:4a9ab037b71927babc6d9e7fc01aea9e66dc2a4a34dff06ef0724a4049629f94"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4dab9484ec605c3016df9ad4fd4f9a390bc5d816a3b10c6550f8424bb80b18c"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8a5028425820731d8c6c098ab642d7b8b999758e24acae03ed38a66eca8335"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e5ab4fc177dd41536b3c32b2ea11380dd3d4619a385860621478ac2d25ceb00"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d88d0054d3fa11ce936184896bed3c1c5441d6fa483b498fac6a5d0dd6f64a9"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2a054a8725f05b4b6503357e0ac1c4e8234ad3b0c2ac130d6ffc66f0e170e2"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0d9db5a161c99375a0c68c058e227bee1d89303300802601d76a3d01f74e258"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6273ea2c8ffdac7b7fda2653c49682db815aebf4a89243a6feccf5e36c18c347"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:4c973add636efc61de22530b2ef83a65f39b6d6f656df97f678720e20de26caa"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b69d1973354758007f46cf2d44a4f3d0933f10b6dc9bf15cf1356e037f6f731a"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3619320641fd212aaf5997b6ca505e97540b7e16418f4a241f44cdf108ffb50d"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:491535d45cd7ad7e4a2af4a5169b0d07bebf1adfd164b0368da8aa41e19907a5"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:54d86c0cada6aba4ec4c047d0e348cbad7063b87ae0f005d9f8c9ad04d4a92a2"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca1124aced216b2500dc2609eade086d718e8249cb9696660ab447d50a758bd"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c9024169becccf0cb470ada03ee578d7348c119a0d42af3dcf9eda96e3a247c"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:26895a4268ae5a2849269f4991cdc97236e4b9c010e51137becf25182daac405"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:ca4df25762cf71308c446e33c9b1fdca2923a3f13de616e2a949f38bf21ff5a8"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5a28fcedd762349519276c36634e71853b4541079cab4acaaac60c4421827308"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c173ddcd86afd2535e2b695217e82191580663a1d1928239f877f5a1649ef39f"}, - {file = "pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"}, + {file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"}, ] [package.dependencies] @@ -2188,7 +3081,7 @@ version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["main", "docs"] +groups = ["main", "docs", "vectorbt"] files = [ {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, @@ -2215,14 +3108,14 @@ test = ["beautifulsoup4", "flake8", "pytest", "pytest-cov"] [[package]] name = "pymdown-extensions" -version = "10.16.1" +version = "10.20" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.9" groups = ["docs"] files = [ - {file = "pymdown_extensions-10.16.1-py3-none-any.whl", hash = "sha256:d6ba157a6c03146a7fb122b2b9a121300056384eafeec9c9f9e584adfdb2a32d"}, - {file = "pymdown_extensions-10.16.1.tar.gz", hash = "sha256:aace82bcccba3efc03e25d584e6a22d27a8e17caa3f4dd9f207e49b787aa9a91"}, + {file = "pymdown_extensions-10.20-py3-none-any.whl", hash = "sha256:ea9e62add865da80a271d00bfa1c0fa085b20d133fb3fc97afdc88e682f60b2f"}, + {file = "pymdown_extensions-10.20.tar.gz", hash = "sha256:5c73566ab0cf38c6ba084cb7c5ea64a119ae0500cce754ccb682761dfea13a52"}, ] [package.dependencies] @@ -2232,13 +3125,28 @@ pyyaml = "*" [package.extras] extra = ["pygments (>=2.19.1)"] +[[package]] +name = "pyparsing" +version = "3.3.1" +description = "pyparsing - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.9" +groups = ["vectorbt"] +files = [ + {file = "pyparsing-3.3.1-py3-none-any.whl", hash = "sha256:023b5e7e5520ad96642e2c6db4cb683d3970bd640cdf7115049a6e9c3682df82"}, + {file = "pyparsing-3.3.1.tar.gz", hash = "sha256:47fad0f17ac1e2cad3de3b458570fbc9b03560aa029ed5e16ee5554da9a2251c"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main", "docs"] +groups = ["main", "docs", "vectorbt"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -2249,14 +3157,14 @@ six = ">=1.5" [[package]] name = "python-dotenv" -version = "1.1.1" +version = "1.2.1" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"}, - {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"}, + {file = "python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61"}, + {file = "python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6"}, ] [package.extras] @@ -2268,7 +3176,7 @@ version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" -groups = ["main"] +groups = ["main", "vectorbt"] files = [ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, @@ -2384,15 +3292,156 @@ files = [ {file = "rcslice-1.1.0.tar.gz", hash = "sha256:a2ce70a60690eb63e52b722e046b334c3aaec5e900b28578f529878782ee5c6e"}, ] +[[package]] +name = "regex" +version = "2026.1.15" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.9" +groups = ["vectorbt"] +files = [ + {file = "regex-2026.1.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4e3dd93c8f9abe8aa4b6c652016da9a3afa190df5ad822907efe6b206c09896e"}, + {file = "regex-2026.1.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:97499ff7862e868b1977107873dd1a06e151467129159a6ffd07b66706ba3a9f"}, + {file = "regex-2026.1.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0bda75ebcac38d884240914c6c43d8ab5fb82e74cde6da94b43b17c411aa4c2b"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7dcc02368585334f5bc81fc73a2a6a0bbade60e7d83da21cead622faf408f32c"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:693b465171707bbe882a7a05de5e866f33c76aa449750bee94a8d90463533cc9"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b0d190e6f013ea938623a58706d1469a62103fb2a241ce2873a9906e0386582c"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ff818702440a5878a81886f127b80127f5d50563753a28211482867f8318106"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f052d1be37ef35a54e394de66136e30fa1191fab64f71fc06ac7bc98c9a84618"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6bfc31a37fd1592f0c4fc4bfc674b5c42e52efe45b4b7a6a14f334cca4bcebe4"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3d6ce5ae80066b319ae3bc62fd55a557c9491baa5efd0d355f0de08c4ba54e79"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1704d204bd42b6bb80167df0e4554f35c255b579ba99616def38f69e14a5ccb9"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:e3174a5ed4171570dc8318afada56373aa9289eb6dc0d96cceb48e7358b0e220"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:87adf5bd6d72e3e17c9cb59ac4096b1faaf84b7eb3037a5ffa61c4b4370f0f13"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e85dc94595f4d766bd7d872a9de5ede1ca8d3063f3bdf1e2c725f5eb411159e3"}, + {file = "regex-2026.1.15-cp310-cp310-win32.whl", hash = "sha256:21ca32c28c30d5d65fc9886ff576fc9b59bbca08933e844fa2363e530f4c8218"}, + {file = "regex-2026.1.15-cp310-cp310-win_amd64.whl", hash = "sha256:3038a62fc7d6e5547b8915a3d927a0fbeef84cdbe0b1deb8c99bbd4a8961b52a"}, + {file = "regex-2026.1.15-cp310-cp310-win_arm64.whl", hash = "sha256:505831646c945e3e63552cc1b1b9b514f0e93232972a2d5bedbcc32f15bc82e3"}, + {file = "regex-2026.1.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ae6020fb311f68d753b7efa9d4b9a5d47a5d6466ea0d5e3b5a471a960ea6e4a"}, + {file = "regex-2026.1.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:eddf73f41225942c1f994914742afa53dc0d01a6e20fe14b878a1b1edc74151f"}, + {file = "regex-2026.1.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e8cd52557603f5c66a548f69421310886b28b7066853089e1a71ee710e1cdc1"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5170907244b14303edc5978f522f16c974f32d3aa92109fabc2af52411c9433b"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2748c1ec0663580b4510bd89941a31560b4b439a0b428b49472a3d9944d11cd8"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2f2775843ca49360508d080eaa87f94fa248e2c946bbcd963bb3aae14f333413"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9ea2604370efc9a174c1b5dcc81784fb040044232150f7f33756049edfc9026"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0dcd31594264029b57bf16f37fd7248a70b3b764ed9e0839a8f271b2d22c0785"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c08c1f3e34338256732bd6938747daa3c0d5b251e04b6e43b5813e94d503076e"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e43a55f378df1e7a4fa3547c88d9a5a9b7113f653a66821bcea4718fe6c58763"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:f82110ab962a541737bd0ce87978d4c658f06e7591ba899192e2712a517badbb"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:27618391db7bdaf87ac6c92b31e8f0dfb83a9de0075855152b720140bda177a2"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bfb0d6be01fbae8d6655c8ca21b3b72458606c4aec9bbc932db758d47aba6db1"}, + {file = "regex-2026.1.15-cp311-cp311-win32.whl", hash = "sha256:b10e42a6de0e32559a92f2f8dc908478cc0fa02838d7dbe764c44dca3fa13569"}, + {file = "regex-2026.1.15-cp311-cp311-win_amd64.whl", hash = "sha256:e9bf3f0bbdb56633c07d7116ae60a576f846efdd86a8848f8d62b749e1209ca7"}, + {file = "regex-2026.1.15-cp311-cp311-win_arm64.whl", hash = "sha256:41aef6f953283291c4e4e6850607bd71502be67779586a61472beacb315c97ec"}, + {file = "regex-2026.1.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:4c8fcc5793dde01641a35905d6731ee1548f02b956815f8f1cab89e515a5bdf1"}, + {file = "regex-2026.1.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bfd876041a956e6a90ad7cdb3f6a630c07d491280bfeed4544053cd434901681"}, + {file = "regex-2026.1.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9250d087bc92b7d4899ccd5539a1b2334e44eee85d848c4c1aef8e221d3f8c8f"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c8a154cf6537ebbc110e24dabe53095e714245c272da9c1be05734bdad4a61aa"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8050ba2e3ea1d8731a549e83c18d2f0999fbc99a5f6bd06b4c91449f55291804"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf065240704cb8951cc04972cf107063917022511273e0969bdb34fc173456c"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c32bef3e7aeee75746748643667668ef941d28b003bfc89994ecf09a10f7a1b5"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d5eaa4a4c5b1906bd0d2508d68927f15b81821f85092e06f1a34a4254b0e1af3"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:86c1077a3cc60d453d4084d5b9649065f3bf1184e22992bd322e1f081d3117fb"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:2b091aefc05c78d286657cd4db95f2e6313375ff65dcf085e42e4c04d9c8d410"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:57e7d17f59f9ebfa9667e6e5a1c0127b96b87cb9cede8335482451ed00788ba4"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c6c4dcdfff2c08509faa15d36ba7e5ef5fcfab25f1e8f85a0c8f45bc3a30725d"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf8ff04c642716a7f2048713ddc6278c5fd41faa3b9cab12607c7abecd012c22"}, + {file = "regex-2026.1.15-cp312-cp312-win32.whl", hash = "sha256:82345326b1d8d56afbe41d881fdf62f1926d7264b2fc1537f99ae5da9aad7913"}, + {file = "regex-2026.1.15-cp312-cp312-win_amd64.whl", hash = "sha256:4def140aa6156bc64ee9912383d4038f3fdd18fee03a6f222abd4de6357ce42a"}, + {file = "regex-2026.1.15-cp312-cp312-win_arm64.whl", hash = "sha256:c6c565d9a6e1a8d783c1948937ffc377dd5771e83bd56de8317c450a954d2056"}, + {file = "regex-2026.1.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e69d0deeb977ffe7ed3d2e4439360089f9c3f217ada608f0f88ebd67afb6385e"}, + {file = "regex-2026.1.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3601ffb5375de85a16f407854d11cca8fe3f5febbe3ac78fb2866bb220c74d10"}, + {file = "regex-2026.1.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4c5ef43b5c2d4114eb8ea424bb8c9cec01d5d17f242af88b2448f5ee81caadbc"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:968c14d4f03e10b2fd960f1d5168c1f0ac969381d3c1fcc973bc45fb06346599"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:56a5595d0f892f214609c9f76b41b7428bed439d98dc961efafdd1354d42baae"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf650f26087363434c4e560011f8e4e738f6f3e029b85d4904c50135b86cfa5"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18388a62989c72ac24de75f1449d0fb0b04dfccd0a1a7c1c43af5eb503d890f6"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6d220a2517f5893f55daac983bfa9fe998a7dbcaee4f5d27a88500f8b7873788"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c9c08c2fbc6120e70abff5d7f28ffb4d969e14294fb2143b4b5c7d20e46d1714"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7ef7d5d4bd49ec7364315167a4134a015f61e8266c6d446fc116a9ac4456e10d"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:6e42844ad64194fa08d5ccb75fe6a459b9b08e6d7296bd704460168d58a388f3"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cfecdaa4b19f9ca534746eb3b55a5195d5c95b88cac32a205e981ec0a22b7d31"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:08df9722d9b87834a3d701f3fca570b2be115654dbfd30179f30ab2f39d606d3"}, + {file = "regex-2026.1.15-cp313-cp313-win32.whl", hash = "sha256:d426616dae0967ca225ab12c22274eb816558f2f99ccb4a1d52ca92e8baf180f"}, + {file = "regex-2026.1.15-cp313-cp313-win_amd64.whl", hash = "sha256:febd38857b09867d3ed3f4f1af7d241c5c50362e25ef43034995b77a50df494e"}, + {file = "regex-2026.1.15-cp313-cp313-win_arm64.whl", hash = "sha256:8e32f7896f83774f91499d239e24cebfadbc07639c1494bb7213983842348337"}, + {file = "regex-2026.1.15-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ec94c04149b6a7b8120f9f44565722c7ae31b7a6d2275569d2eefa76b83da3be"}, + {file = "regex-2026.1.15-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:40c86d8046915bb9aeb15d3f3f15b6fd500b8ea4485b30e1bbc799dab3fe29f8"}, + {file = "regex-2026.1.15-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:726ea4e727aba21643205edad8f2187ec682d3305d790f73b7a51c7587b64bdd"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1cb740d044aff31898804e7bf1181cc72c03d11dfd19932b9911ffc19a79070a"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05d75a668e9ea16f832390d22131fe1e8acc8389a694c8febc3e340b0f810b93"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d991483606f3dbec93287b9f35596f41aa2e92b7c2ebbb935b63f409e243c9af"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:194312a14819d3e44628a44ed6fea6898fdbecb0550089d84c403475138d0a09"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fe2fda4110a3d0bc163c2e0664be44657431440722c5c5315c65155cab92f9e5"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:124dc36c85d34ef2d9164da41a53c1c8c122cfb1f6e1ec377a1f27ee81deb794"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1774cd1981cd212506a23a14dba7fdeaee259f5deba2df6229966d9911e767a"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:b5f7d8d2867152cdb625e72a530d2ccb48a3d199159144cbdd63870882fb6f80"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:492534a0ab925d1db998defc3c302dae3616a2fc3fe2e08db1472348f096ddf2"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c661fc820cfb33e166bf2450d3dadbda47c8d8981898adb9b6fe24e5e582ba60"}, + {file = "regex-2026.1.15-cp313-cp313t-win32.whl", hash = "sha256:99ad739c3686085e614bf77a508e26954ff1b8f14da0e3765ff7abbf7799f952"}, + {file = "regex-2026.1.15-cp313-cp313t-win_amd64.whl", hash = "sha256:32655d17905e7ff8ba5c764c43cb124e34a9245e45b83c22e81041e1071aee10"}, + {file = "regex-2026.1.15-cp313-cp313t-win_arm64.whl", hash = "sha256:b2a13dd6a95e95a489ca242319d18fc02e07ceb28fa9ad146385194d95b3c829"}, + {file = "regex-2026.1.15-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:d920392a6b1f353f4aa54328c867fec3320fa50657e25f64abf17af054fc97ac"}, + {file = "regex-2026.1.15-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b5a28980a926fa810dbbed059547b02783952e2efd9c636412345232ddb87ff6"}, + {file = "regex-2026.1.15-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:621f73a07595d83f28952d7bd1e91e9d1ed7625fb7af0064d3516674ec93a2a2"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3d7d92495f47567a9b1669c51fc8d6d809821849063d168121ef801bbc213846"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8dd16fba2758db7a3780a051f245539c4451ca20910f5a5e6ea1c08d06d4a76b"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1e1808471fbe44c1a63e5f577a1d5f02fe5d66031dcbdf12f093ffc1305a858e"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0751a26ad39d4f2ade8fe16c59b2bf5cb19eb3d2cd543e709e583d559bd9efde"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0f0c7684c7f9ca241344ff95a1de964f257a5251968484270e91c25a755532c5"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:74f45d170a21df41508cb67165456538425185baaf686281fa210d7e729abc34"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:f1862739a1ffb50615c0fde6bae6569b5efbe08d98e59ce009f68a336f64da75"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:453078802f1b9e2b7303fb79222c054cb18e76f7bdc220f7530fdc85d319f99e"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:a30a68e89e5a218b8b23a52292924c1f4b245cb0c68d1cce9aec9bbda6e2c160"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9479cae874c81bf610d72b85bb681a94c95722c127b55445285fb0e2c82db8e1"}, + {file = "regex-2026.1.15-cp314-cp314-win32.whl", hash = "sha256:d639a750223132afbfb8f429c60d9d318aeba03281a5f1ab49f877456448dcf1"}, + {file = "regex-2026.1.15-cp314-cp314-win_amd64.whl", hash = "sha256:4161d87f85fa831e31469bfd82c186923070fc970b9de75339b68f0c75b51903"}, + {file = "regex-2026.1.15-cp314-cp314-win_arm64.whl", hash = "sha256:91c5036ebb62663a6b3999bdd2e559fd8456d17e2b485bf509784cd31a8b1705"}, + {file = "regex-2026.1.15-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ee6854c9000a10938c79238de2379bea30c82e4925a371711af45387df35cab8"}, + {file = "regex-2026.1.15-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c2b80399a422348ce5de4fe40c418d6299a0fa2803dd61dc0b1a2f28e280fcf"}, + {file = "regex-2026.1.15-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:dca3582bca82596609959ac39e12b7dad98385b4fefccb1151b937383cec547d"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef71d476caa6692eea743ae5ea23cde3260677f70122c4d258ca952e5c2d4e84"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c243da3436354f4af6c3058a3f81a97d47ea52c9bd874b52fd30274853a1d5df"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8355ad842a7c7e9e5e55653eade3b7d1885ba86f124dd8ab1f722f9be6627434"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f192a831d9575271a22d804ff1a5355355723f94f31d9eef25f0d45a152fdc1a"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:166551807ec20d47ceaeec380081f843e88c8949780cd42c40f18d16168bed10"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9ca1cbdc0fbfe5e6e6f8221ef2309988db5bcede52443aeaee9a4ad555e0dac"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b30bcbd1e1221783c721483953d9e4f3ab9c5d165aa709693d3f3946747b1aea"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:2a8d7b50c34578d0d3bf7ad58cde9652b7d683691876f83aedc002862a35dc5e"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:9d787e3310c6a6425eb346be4ff2ccf6eece63017916fd77fe8328c57be83521"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:619843841e220adca114118533a574a9cd183ed8a28b85627d2844c500a2b0db"}, + {file = "regex-2026.1.15-cp314-cp314t-win32.whl", hash = "sha256:e90b8db97f6f2c97eb045b51a6b2c5ed69cedd8392459e0642d4199b94fabd7e"}, + {file = "regex-2026.1.15-cp314-cp314t-win_amd64.whl", hash = "sha256:5ef19071f4ac9f0834793af85bd04a920b4407715624e40cb7a0631a11137cdf"}, + {file = "regex-2026.1.15-cp314-cp314t-win_arm64.whl", hash = "sha256:ca89c5e596fc05b015f27561b3793dc2fa0917ea0d7507eebb448efd35274a70"}, + {file = "regex-2026.1.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:55b4ea996a8e4458dd7b584a2f89863b1655dd3d17b88b46cbb9becc495a0ec5"}, + {file = "regex-2026.1.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e1e28be779884189cdd57735e997f282b64fd7ccf6e2eef3e16e57d7a34a815"}, + {file = "regex-2026.1.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0057de9eaef45783ff69fa94ae9f0fd906d629d0bd4c3217048f46d1daa32e9b"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc7cd0b2be0f0269283a45c0d8b2c35e149d1319dcb4a43c9c3689fa935c1ee6"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8db052bbd981e1666f09e957f3790ed74080c2229007c1dd67afdbf0b469c48b"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:343db82cb3712c31ddf720f097ef17c11dab2f67f7a3e7be976c4f82eba4e6df"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:55e9d0118d97794367309635df398bdfd7c33b93e2fdfa0b239661cd74b4c14e"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:008b185f235acd1e53787333e5690082e4f156c44c87d894f880056089e9bc7c"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fd65af65e2aaf9474e468f9e571bd7b189e1df3a61caa59dcbabd0000e4ea839"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f42e68301ff4afee63e365a5fc302b81bb8ba31af625a671d7acb19d10168a8c"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f7792f27d3ee6e0244ea4697d92b825f9a329ab5230a78c1a68bd274e64b5077"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:dbaf3c3c37ef190439981648ccbf0c02ed99ae066087dd117fcb616d80b010a4"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:adc97a9077c2696501443d8ad3fa1b4fc6d131fc8fd7dfefd1a723f89071cf0a"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:069f56a7bf71d286a6ff932a9e6fb878f151c998ebb2519a9f6d1cee4bffdba3"}, + {file = "regex-2026.1.15-cp39-cp39-win32.whl", hash = "sha256:ea4e6b3566127fda5e007e90a8fd5a4169f0cf0619506ed426db647f19c8454a"}, + {file = "regex-2026.1.15-cp39-cp39-win_amd64.whl", hash = "sha256:cda1ed70d2b264952e88adaa52eea653a33a1b98ac907ae2f86508eb44f65cdc"}, + {file = "regex-2026.1.15-cp39-cp39-win_arm64.whl", hash = "sha256:b325d4714c3c48277bfea1accd94e193ad6ed42b4bad79ad64f3b8f8a31260a5"}, + {file = "regex-2026.1.15.tar.gz", hash = "sha256:164759aa25575cbc0651bef59a0b18353e54300d79ace8084c818ad8ac72b7d5"}, +] + [[package]] name = "rel" -version = "0.4.9.21" +version = "0.4.9.23" description = "Registered Event Listener. Provides standard (pyevent) interface and functionality without external dependencies" optional = false python-versions = "*" groups = ["main"] files = [ - {file = "rel-0.4.9.21-py3-none-any.whl", hash = "sha256:8fe2a0be9a5c1f72cad7a5a908f2241985f81f9f5c4a9b9de09068d7cc77a316"}, + {file = "rel-0.4.9.23-py3-none-any.whl", hash = "sha256:2ea72bb5c334f99d570b159722ee922d924172bd033b5ed4e0fabf2b047e317f"}, ] [[package]] @@ -2401,7 +3450,7 @@ version = "2.32.5" description = "Python HTTP for Humans." optional = false python-versions = ">=3.9" -groups = ["main", "docs"] +groups = ["main", "docs", "vectorbt"] files = [ {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, @@ -2417,83 +3466,160 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "schedule" +version = "1.2.2" +description = "Job scheduling for humans." +optional = false +python-versions = ">=3.7" +groups = ["vectorbt"] +files = [ + {file = "schedule-1.2.2-py3-none-any.whl", hash = "sha256:5bef4a2a0183abf44046ae0d164cadcac21b1db011bdd8102e4a0c1e91e06a7d"}, + {file = "schedule-1.2.2.tar.gz", hash = "sha256:15fe9c75fe5fd9b9627f3f19cc0ef1420508f9f9a46f45cd0769ef75ede5f0b7"}, +] + +[package.extras] +timezone = ["pytz"] + +[[package]] +name = "scikit-learn" +version = "1.8.0" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.11" +groups = ["vectorbt"] +files = [ + {file = "scikit_learn-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:146b4d36f800c013d267b29168813f7a03a43ecd2895d04861f1240b564421da"}, + {file = "scikit_learn-1.8.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f984ca4b14914e6b4094c5d52a32ea16b49832c03bd17a110f004db3c223e8e1"}, + {file = "scikit_learn-1.8.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e30adb87f0cc81c7690a84f7932dd66be5bac57cfe16b91cb9151683a4a2d3b"}, + {file = "scikit_learn-1.8.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ada8121bcb4dac28d930febc791a69f7cb1673c8495e5eee274190b73a4559c1"}, + {file = "scikit_learn-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:c57b1b610bd1f40ba43970e11ce62821c2e6569e4d74023db19c6b26f246cb3b"}, + {file = "scikit_learn-1.8.0-cp311-cp311-win_arm64.whl", hash = "sha256:2838551e011a64e3053ad7618dda9310175f7515f1742fa2d756f7c874c05961"}, + {file = "scikit_learn-1.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5fb63362b5a7ddab88e52b6dbb47dac3fd7dafeee740dc6c8d8a446ddedade8e"}, + {file = "scikit_learn-1.8.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:5025ce924beccb28298246e589c691fe1b8c1c96507e6d27d12c5fadd85bfd76"}, + {file = "scikit_learn-1.8.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4496bb2cf7a43ce1a2d7524a79e40bc5da45cf598dbf9545b7e8316ccba47bb4"}, + {file = "scikit_learn-1.8.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0bcfe4d0d14aec44921545fd2af2338c7471de9cb701f1da4c9d85906ab847a"}, + {file = "scikit_learn-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:35c007dedb2ffe38fe3ee7d201ebac4a2deccd2408e8621d53067733e3c74809"}, + {file = "scikit_learn-1.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:8c497fff237d7b4e07e9ef1a640887fa4fb765647f86fbe00f969ff6280ce2bb"}, + {file = "scikit_learn-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0d6ae97234d5d7079dc0040990a6f7aeb97cb7fa7e8945f1999a429b23569e0a"}, + {file = "scikit_learn-1.8.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:edec98c5e7c128328124a029bceb09eda2d526997780fef8d65e9a69eead963e"}, + {file = "scikit_learn-1.8.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:74b66d8689d52ed04c271e1329f0c61635bcaf5b926db9b12d58914cdc01fe57"}, + {file = "scikit_learn-1.8.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8fdf95767f989b0cfedb85f7ed8ca215d4be728031f56ff5a519ee1e3276dc2e"}, + {file = "scikit_learn-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:2de443b9373b3b615aec1bb57f9baa6bb3a9bd093f1269ba95c17d870422b271"}, + {file = "scikit_learn-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:eddde82a035681427cbedded4e6eff5e57fa59216c2e3e90b10b19ab1d0a65c3"}, + {file = "scikit_learn-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7cc267b6108f0a1499a734167282c00c4ebf61328566b55ef262d48e9849c735"}, + {file = "scikit_learn-1.8.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:fe1c011a640a9f0791146011dfd3c7d9669785f9fed2b2a5f9e207536cf5c2fd"}, + {file = "scikit_learn-1.8.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:72358cce49465d140cc4e7792015bb1f0296a9742d5622c67e31399b75468b9e"}, + {file = "scikit_learn-1.8.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:80832434a6cc114f5219211eec13dcbc16c2bac0e31ef64c6d346cde3cf054cb"}, + {file = "scikit_learn-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ee787491dbfe082d9c3013f01f5991658b0f38aa8177e4cd4bf434c58f551702"}, + {file = "scikit_learn-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf97c10a3f5a7543f9b88cbf488d33d175e9146115a451ae34568597ba33dcde"}, + {file = "scikit_learn-1.8.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c22a2da7a198c28dd1a6e1136f19c830beab7fdca5b3e5c8bba8394f8a5c45b3"}, + {file = "scikit_learn-1.8.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:6b595b07a03069a2b1740dc08c2299993850ea81cce4fe19b2421e0c970de6b7"}, + {file = "scikit_learn-1.8.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:29ffc74089f3d5e87dfca4c2c8450f88bdc61b0fc6ed5d267f3988f19a1309f6"}, + {file = "scikit_learn-1.8.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fb65db5d7531bccf3a4f6bec3462223bea71384e2cda41da0f10b7c292b9e7c4"}, + {file = "scikit_learn-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:56079a99c20d230e873ea40753102102734c5953366972a71d5cb39a32bc40c6"}, + {file = "scikit_learn-1.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:3bad7565bc9cf37ce19a7c0d107742b320c1285df7aab1a6e2d28780df167242"}, + {file = "scikit_learn-1.8.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:4511be56637e46c25721e83d1a9cea9614e7badc7040c4d573d75fbe257d6fd7"}, + {file = "scikit_learn-1.8.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:a69525355a641bf8ef136a7fa447672fb54fe8d60cab5538d9eb7c6438543fb9"}, + {file = "scikit_learn-1.8.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c2656924ec73e5939c76ac4c8b026fc203b83d8900362eb2599d8aee80e4880f"}, + {file = "scikit_learn-1.8.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15fc3b5d19cc2be65404786857f2e13c70c83dd4782676dd6814e3b89dc8f5b9"}, + {file = "scikit_learn-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:00d6f1d66fbcf4eba6e356e1420d33cc06c70a45bb1363cd6f6a8e4ebbbdece2"}, + {file = "scikit_learn-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:f28dd15c6bb0b66ba09728cf09fd8736c304be29409bd8445a080c1280619e8c"}, + {file = "scikit_learn-1.8.0.tar.gz", hash = "sha256:9bccbb3b40e3de10351f8f5068e105d0f4083b1a65fa07b6634fbc401a6287fd"}, +] + +[package.dependencies] +joblib = ">=1.3.0" +numpy = ">=1.24.1" +scipy = ">=1.10.0" +threadpoolctl = ">=3.2.0" + +[package.extras] +benchmark = ["matplotlib (>=3.6.1)", "memory_profiler (>=0.57.0)", "pandas (>=1.5.0)"] +build = ["cython (>=3.1.2)", "meson-python (>=0.17.1)", "numpy (>=1.24.1)", "scipy (>=1.10.0)"] +docs = ["Pillow (>=10.1.0)", "matplotlib (>=3.6.1)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.5.0)", "plotly (>=5.18.0)", "polars (>=0.20.30)", "pooch (>=1.8.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.22.0)", "seaborn (>=0.13.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.17.1)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)", "towncrier (>=24.8.0)"] +examples = ["matplotlib (>=3.6.1)", "pandas (>=1.5.0)", "plotly (>=5.18.0)", "pooch (>=1.8.0)", "scikit-image (>=0.22.0)", "seaborn (>=0.13.0)"] +install = ["joblib (>=1.3.0)", "numpy (>=1.24.1)", "scipy (>=1.10.0)", "threadpoolctl (>=3.2.0)"] +maintenance = ["conda-lock (==3.0.1)"] +tests = ["matplotlib (>=3.6.1)", "mypy (>=1.15)", "numpydoc (>=1.2.0)", "pandas (>=1.5.0)", "polars (>=0.20.30)", "pooch (>=1.8.0)", "pyamg (>=5.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.11.7)"] + [[package]] name = "scipy" -version = "1.16.2" +version = "1.17.0" description = "Fundamental algorithms for scientific computing in Python" optional = false python-versions = ">=3.11" -groups = ["main"] +groups = ["main", "vectorbt"] files = [ - {file = "scipy-1.16.2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:6ab88ea43a57da1af33292ebd04b417e8e2eaf9d5aa05700be8d6e1b6501cd92"}, - {file = "scipy-1.16.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c95e96c7305c96ede73a7389f46ccd6c659c4da5ef1b2789466baeaed3622b6e"}, - {file = "scipy-1.16.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:87eb178db04ece7c698220d523c170125dbffebb7af0345e66c3554f6f60c173"}, - {file = "scipy-1.16.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:4e409eac067dcee96a57fbcf424c13f428037827ec7ee3cb671ff525ca4fc34d"}, - {file = "scipy-1.16.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e574be127bb760f0dad24ff6e217c80213d153058372362ccb9555a10fc5e8d2"}, - {file = "scipy-1.16.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f5db5ba6188d698ba7abab982ad6973265b74bb40a1efe1821b58c87f73892b9"}, - {file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec6e74c4e884104ae006d34110677bfe0098203a3fec2f3faf349f4cb05165e3"}, - {file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:912f46667d2d3834bc3d57361f854226475f695eb08c08a904aadb1c936b6a88"}, - {file = "scipy-1.16.2-cp311-cp311-win_amd64.whl", hash = "sha256:91e9e8a37befa5a69e9cacbe0bcb79ae5afb4a0b130fd6db6ee6cc0d491695fa"}, - {file = "scipy-1.16.2-cp311-cp311-win_arm64.whl", hash = "sha256:f3bf75a6dcecab62afde4d1f973f1692be013110cad5338007927db8da73249c"}, - {file = "scipy-1.16.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:89d6c100fa5c48472047632e06f0876b3c4931aac1f4291afc81a3644316bb0d"}, - {file = "scipy-1.16.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ca748936cd579d3f01928b30a17dc474550b01272d8046e3e1ee593f23620371"}, - {file = "scipy-1.16.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:fac4f8ce2ddb40e2e3d0f7ec36d2a1e7f92559a2471e59aec37bd8d9de01fec0"}, - {file = "scipy-1.16.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:033570f1dcefd79547a88e18bccacff025c8c647a330381064f561d43b821232"}, - {file = "scipy-1.16.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ea3421209bf00c8a5ef2227de496601087d8f638a2363ee09af059bd70976dc1"}, - {file = "scipy-1.16.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f66bd07ba6f84cd4a380b41d1bf3c59ea488b590a2ff96744845163309ee8e2f"}, - {file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e9feab931bd2aea4a23388c962df6468af3d808ddf2d40f94a81c5dc38f32ef"}, - {file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03dfc75e52f72cf23ec2ced468645321407faad8f0fe7b1f5b49264adbc29cb1"}, - {file = "scipy-1.16.2-cp312-cp312-win_amd64.whl", hash = "sha256:0ce54e07bbb394b417457409a64fd015be623f36e330ac49306433ffe04bc97e"}, - {file = "scipy-1.16.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a8ffaa4ac0df81a0b94577b18ee079f13fecdb924df3328fc44a7dc5ac46851"}, - {file = "scipy-1.16.2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:84f7bf944b43e20b8a894f5fe593976926744f6c185bacfcbdfbb62736b5cc70"}, - {file = "scipy-1.16.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:5c39026d12edc826a1ef2ad35ad1e6d7f087f934bb868fc43fa3049c8b8508f9"}, - {file = "scipy-1.16.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e52729ffd45b68777c5319560014d6fd251294200625d9d70fd8626516fc49f5"}, - {file = "scipy-1.16.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:024dd4a118cccec09ca3209b7e8e614931a6ffb804b2a601839499cb88bdf925"}, - {file = "scipy-1.16.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7a5dc7ee9c33019973a470556081b0fd3c9f4c44019191039f9769183141a4d9"}, - {file = "scipy-1.16.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c2275ff105e508942f99d4e3bc56b6ef5e4b3c0af970386ca56b777608ce95b7"}, - {file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:af80196eaa84f033e48444d2e0786ec47d328ba00c71e4299b602235ffef9acb"}, - {file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9fb1eb735fe3d6ed1f89918224e3385fbf6f9e23757cacc35f9c78d3b712dd6e"}, - {file = "scipy-1.16.2-cp313-cp313-win_amd64.whl", hash = "sha256:fda714cf45ba43c9d3bae8f2585c777f64e3f89a2e073b668b32ede412d8f52c"}, - {file = "scipy-1.16.2-cp313-cp313-win_arm64.whl", hash = "sha256:2f5350da923ccfd0b00e07c3e5cfb316c1c0d6c1d864c07a72d092e9f20db104"}, - {file = "scipy-1.16.2-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:53d8d2ee29b925344c13bda64ab51785f016b1b9617849dac10897f0701b20c1"}, - {file = "scipy-1.16.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:9e05e33657efb4c6a9d23bd8300101536abd99c85cca82da0bffff8d8764d08a"}, - {file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:7fe65b36036357003b3ef9d37547abeefaa353b237e989c21027b8ed62b12d4f"}, - {file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6406d2ac6d40b861cccf57f49592f9779071655e9f75cd4f977fa0bdd09cb2e4"}, - {file = "scipy-1.16.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ff4dc42bd321991fbf611c23fc35912d690f731c9914bf3af8f417e64aca0f21"}, - {file = "scipy-1.16.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:654324826654d4d9133e10675325708fb954bc84dae6e9ad0a52e75c6b1a01d7"}, - {file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63870a84cd15c44e65220eaed2dac0e8f8b26bbb991456a033c1d9abfe8a94f8"}, - {file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:fa01f0f6a3050fa6a9771a95d5faccc8e2f5a92b4a2e5440a0fa7264a2398472"}, - {file = "scipy-1.16.2-cp313-cp313t-win_amd64.whl", hash = "sha256:116296e89fba96f76353a8579820c2512f6e55835d3fad7780fece04367de351"}, - {file = "scipy-1.16.2-cp313-cp313t-win_arm64.whl", hash = "sha256:98e22834650be81d42982360382b43b17f7ba95e0e6993e2a4f5b9ad9283a94d"}, - {file = "scipy-1.16.2-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:567e77755019bb7461513c87f02bb73fb65b11f049aaaa8ca17cfaa5a5c45d77"}, - {file = "scipy-1.16.2-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:17d9bb346194e8967296621208fcdfd39b55498ef7d2f376884d5ac47cec1a70"}, - {file = "scipy-1.16.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:0a17541827a9b78b777d33b623a6dcfe2ef4a25806204d08ead0768f4e529a88"}, - {file = "scipy-1.16.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:d7d4c6ba016ffc0f9568d012f5f1eb77ddd99412aea121e6fa8b4c3b7cbad91f"}, - {file = "scipy-1.16.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9702c4c023227785c779cba2e1d6f7635dbb5b2e0936cdd3a4ecb98d78fd41eb"}, - {file = "scipy-1.16.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d1cdf0ac28948d225decdefcc45ad7dd91716c29ab56ef32f8e0d50657dffcc7"}, - {file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:70327d6aa572a17c2941cdfb20673f82e536e91850a2e4cb0c5b858b690e1548"}, - {file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5221c0b2a4b58aa7c4ed0387d360fd90ee9086d383bb34d9f2789fafddc8a936"}, - {file = "scipy-1.16.2-cp314-cp314-win_amd64.whl", hash = "sha256:f5a85d7b2b708025af08f060a496dd261055b617d776fc05a1a1cc69e09fe9ff"}, - {file = "scipy-1.16.2-cp314-cp314-win_arm64.whl", hash = "sha256:2cc73a33305b4b24556957d5857d6253ce1e2dcd67fa0ff46d87d1670b3e1e1d"}, - {file = "scipy-1.16.2-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:9ea2a3fed83065d77367775d689401a703d0f697420719ee10c0780bcab594d8"}, - {file = "scipy-1.16.2-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:7280d926f11ca945c3ef92ba960fa924e1465f8d07ce3a9923080363390624c4"}, - {file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:8afae1756f6a1fe04636407ef7dbece33d826a5d462b74f3d0eb82deabefd831"}, - {file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:5c66511f29aa8d233388e7416a3f20d5cae7a2744d5cee2ecd38c081f4e861b3"}, - {file = "scipy-1.16.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efe6305aeaa0e96b0ccca5ff647a43737d9a092064a3894e46c414db84bc54ac"}, - {file = "scipy-1.16.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7f3a337d9ae06a1e8d655ee9d8ecb835ea5ddcdcbd8d23012afa055ab014f374"}, - {file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bab3605795d269067d8ce78a910220262711b753de8913d3deeaedb5dded3bb6"}, - {file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b0348d8ddb55be2a844c518cd8cc8deeeb8aeba707cf834db5758fc89b476a2c"}, - {file = "scipy-1.16.2-cp314-cp314t-win_amd64.whl", hash = "sha256:26284797e38b8a75e14ea6631d29bda11e76ceaa6ddb6fdebbfe4c4d90faf2f9"}, - {file = "scipy-1.16.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d2a4472c231328d4de38d5f1f68fdd6d28a615138f842580a8a321b5845cf779"}, - {file = "scipy-1.16.2.tar.gz", hash = "sha256:af029b153d243a80afb6eabe40b0a07f8e35c9adc269c019f364ad747f826a6b"}, + {file = "scipy-1.17.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:2abd71643797bd8a106dff97894ff7869eeeb0af0f7a5ce02e4227c6a2e9d6fd"}, + {file = "scipy-1.17.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:ef28d815f4d2686503e5f4f00edc387ae58dfd7a2f42e348bb53359538f01558"}, + {file = "scipy-1.17.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:272a9f16d6bb4667e8b50d25d71eddcc2158a214df1b566319298de0939d2ab7"}, + {file = "scipy-1.17.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:7204fddcbec2fe6598f1c5fdf027e9f259106d05202a959a9f1aecf036adc9f6"}, + {file = "scipy-1.17.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fc02c37a5639ee67d8fb646ffded6d793c06c5622d36b35cfa8fe5ececb8f042"}, + {file = "scipy-1.17.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dac97a27520d66c12a34fd90a4fe65f43766c18c0d6e1c0a80f114d2260080e4"}, + {file = "scipy-1.17.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ebb7446a39b3ae0fe8f416a9a3fdc6fba3f11c634f680f16a239c5187bc487c0"}, + {file = "scipy-1.17.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:474da16199f6af66601a01546144922ce402cb17362e07d82f5a6cf8f963e449"}, + {file = "scipy-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:255c0da161bd7b32a6c898e7891509e8a9289f0b1c6c7d96142ee0d2b114c2ea"}, + {file = "scipy-1.17.0-cp311-cp311-win_arm64.whl", hash = "sha256:85b0ac3ad17fa3be50abd7e69d583d98792d7edc08367e01445a1e2076005379"}, + {file = "scipy-1.17.0-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:0d5018a57c24cb1dd828bcf51d7b10e65986d549f52ef5adb6b4d1ded3e32a57"}, + {file = "scipy-1.17.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:88c22af9e5d5a4f9e027e26772cc7b5922fab8bcc839edb3ae33de404feebd9e"}, + {file = "scipy-1.17.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f3cd947f20fe17013d401b64e857c6b2da83cae567adbb75b9dcba865abc66d8"}, + {file = "scipy-1.17.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:e8c0b331c2c1f531eb51f1b4fc9ba709521a712cce58f1aa627bc007421a5306"}, + {file = "scipy-1.17.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5194c445d0a1c7a6c1a4a4681b6b7c71baad98ff66d96b949097e7513c9d6742"}, + {file = "scipy-1.17.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9eeb9b5f5997f75507814ed9d298ab23f62cf79f5a3ef90031b1ee2506abdb5b"}, + {file = "scipy-1.17.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:40052543f7bbe921df4408f46003d6f01c6af109b9e2c8a66dd1cf6cf57f7d5d"}, + {file = "scipy-1.17.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0cf46c8013fec9d3694dc572f0b54100c28405d55d3e2cb15e2895b25057996e"}, + {file = "scipy-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:0937a0b0d8d593a198cededd4c439a0ea216a3f36653901ea1f3e4be949056f8"}, + {file = "scipy-1.17.0-cp312-cp312-win_arm64.whl", hash = "sha256:f603d8a5518c7426414d1d8f82e253e454471de682ce5e39c29adb0df1efb86b"}, + {file = "scipy-1.17.0-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:65ec32f3d32dfc48c72df4291345dae4f048749bc8d5203ee0a3f347f96c5ce6"}, + {file = "scipy-1.17.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:1f9586a58039d7229ce77b52f8472c972448cded5736eaf102d5658bbac4c269"}, + {file = "scipy-1.17.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:9fad7d3578c877d606b1150135c2639e9de9cecd3705caa37b66862977cc3e72"}, + {file = "scipy-1.17.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:423ca1f6584fc03936972b5f7c06961670dbba9f234e71676a7c7ccf938a0d61"}, + {file = "scipy-1.17.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fe508b5690e9eaaa9467fc047f833af58f1152ae51a0d0aed67aa5801f4dd7d6"}, + {file = "scipy-1.17.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6680f2dfd4f6182e7d6db161344537da644d1cf85cf293f015c60a17ecf08752"}, + {file = "scipy-1.17.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eec3842ec9ac9de5917899b277428886042a93db0b227ebbe3a333b64ec7643d"}, + {file = "scipy-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d7425fcafbc09a03731e1bc05581f5fad988e48c6a861f441b7ab729a49a55ea"}, + {file = "scipy-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:87b411e42b425b84777718cc41516b8a7e0795abfa8e8e1d573bf0ef014f0812"}, + {file = "scipy-1.17.0-cp313-cp313-win_arm64.whl", hash = "sha256:357ca001c6e37601066092e7c89cca2f1ce74e2a520ca78d063a6d2201101df2"}, + {file = "scipy-1.17.0-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:ec0827aa4d36cb79ff1b81de898e948a51ac0b9b1c43e4a372c0508c38c0f9a3"}, + {file = "scipy-1.17.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:819fc26862b4b3c73a60d486dbb919202f3d6d98c87cf20c223511429f2d1a97"}, + {file = "scipy-1.17.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:363ad4ae2853d88ebcde3ae6ec46ccca903ea9835ee8ba543f12f575e7b07e4e"}, + {file = "scipy-1.17.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:979c3a0ff8e5ba254d45d59ebd38cde48fce4f10b5125c680c7a4bfe177aab07"}, + {file = "scipy-1.17.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:130d12926ae34399d157de777472bf82e9061c60cc081372b3118edacafe1d00"}, + {file = "scipy-1.17.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e886000eb4919eae3a44f035e63f0fd8b651234117e8f6f29bad1cd26e7bc45"}, + {file = "scipy-1.17.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:13c4096ac6bc31d706018f06a49abe0485f96499deb82066b94d19b02f664209"}, + {file = "scipy-1.17.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cacbaddd91fcffde703934897c5cd2c7cb0371fac195d383f4e1f1c5d3f3bd04"}, + {file = "scipy-1.17.0-cp313-cp313t-win_amd64.whl", hash = "sha256:edce1a1cf66298cccdc48a1bdf8fb10a3bf58e8b58d6c3883dd1530e103f87c0"}, + {file = "scipy-1.17.0-cp313-cp313t-win_arm64.whl", hash = "sha256:30509da9dbec1c2ed8f168b8d8aa853bc6723fede1dbc23c7d43a56f5ab72a67"}, + {file = "scipy-1.17.0-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:c17514d11b78be8f7e6331b983a65a7f5ca1fd037b95e27b280921fe5606286a"}, + {file = "scipy-1.17.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:4e00562e519c09da34c31685f6acc3aa384d4d50604db0f245c14e1b4488bfa2"}, + {file = "scipy-1.17.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:f7df7941d71314e60a481e02d5ebcb3f0185b8d799c70d03d8258f6c80f3d467"}, + {file = "scipy-1.17.0-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:aabf057c632798832f071a8dde013c2e26284043934f53b00489f1773b33527e"}, + {file = "scipy-1.17.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a38c3337e00be6fd8a95b4ed66b5d988bac4ec888fd922c2ea9fe5fb1603dd67"}, + {file = "scipy-1.17.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00fb5f8ec8398ad90215008d8b6009c9db9fa924fd4c7d6be307c6f945f9cd73"}, + {file = "scipy-1.17.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f2a4942b0f5f7c23c7cd641a0ca1955e2ae83dedcff537e3a0259096635e186b"}, + {file = "scipy-1.17.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:dbf133ced83889583156566d2bdf7a07ff89228fe0c0cb727f777de92092ec6b"}, + {file = "scipy-1.17.0-cp314-cp314-win_amd64.whl", hash = "sha256:3625c631a7acd7cfd929e4e31d2582cf00f42fcf06011f59281271746d77e061"}, + {file = "scipy-1.17.0-cp314-cp314-win_arm64.whl", hash = "sha256:9244608d27eafe02b20558523ba57f15c689357c85bdcfe920b1828750aa26eb"}, + {file = "scipy-1.17.0-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:2b531f57e09c946f56ad0b4a3b2abee778789097871fc541e267d2eca081cff1"}, + {file = "scipy-1.17.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:13e861634a2c480bd237deb69333ac79ea1941b94568d4b0efa5db5e263d4fd1"}, + {file = "scipy-1.17.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:eb2651271135154aa24f6481cbae5cc8af1f0dd46e6533fb7b56aa9727b6a232"}, + {file = "scipy-1.17.0-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:c5e8647f60679790c2f5c76be17e2e9247dc6b98ad0d3b065861e082c56e078d"}, + {file = "scipy-1.17.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5fb10d17e649e1446410895639f3385fd2bf4c3c7dfc9bea937bddcbc3d7b9ba"}, + {file = "scipy-1.17.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8547e7c57f932e7354a2319fab613981cde910631979f74c9b542bb167a8b9db"}, + {file = "scipy-1.17.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:33af70d040e8af9d5e7a38b5ed3b772adddd281e3062ff23fec49e49681c38cf"}, + {file = "scipy-1.17.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb55bb97d00f8b7ab95cb64f873eb0bf54d9446264d9f3609130381233483f"}, + {file = "scipy-1.17.0-cp314-cp314t-win_amd64.whl", hash = "sha256:1ff269abf702f6c7e67a4b7aad981d42871a11b9dd83c58d2d2ea624efbd1088"}, + {file = "scipy-1.17.0-cp314-cp314t-win_arm64.whl", hash = "sha256:031121914e295d9791319a1875444d55079885bbae5bdc9c5e0f2ee5f09d34ff"}, + {file = "scipy-1.17.0.tar.gz", hash = "sha256:2591060c8e648d8b96439e111ac41fd8342fdeff1876be2e19dea3fe8930454e"}, ] [package.dependencies] -numpy = ">=1.25.2,<2.6" +numpy = ">=1.26.4,<2.7" [package.extras] -dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] -doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "linkify-it-py", "matplotlib (>=3.5)", "myst-nb (>=1.2.0)", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.2.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] +dev = ["click (<8.3.0)", "cython-lint (>=0.12.2)", "mypy (==1.10.0)", "pycodestyle", "ruff (>=0.12.0)", "spin", "types-psutil", "typing_extensions"] +doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "linkify-it-py", "matplotlib (>=3.5)", "myst-nb (>=1.2.0)", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.2.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)", "tabulate"] test = ["Cython", "array-api-strict (>=2.3.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest (>=8.0.0)", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] @@ -2523,7 +3649,7 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main", "docs"] +groups = ["main", "docs", "vectorbt"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -2543,81 +3669,76 @@ files = [ [[package]] name = "soupsieve" -version = "2.8" +version = "2.8.2" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c"}, - {file = "soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f"}, + {file = "soupsieve-2.8.2-py3-none-any.whl", hash = "sha256:0f4c2f6b5a5fb97a641cf69c0bd163670a0e45e6d6c01a2107f93a6a6f93c51a"}, + {file = "soupsieve-2.8.2.tar.gz", hash = "sha256:78a66b0fdee2ab40b7199dc3e747ee6c6e231899feeaae0b9b98a353afd48fd8"}, ] [[package]] name = "sqlalchemy" -version = "2.0.44" +version = "2.0.45" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "SQLAlchemy-2.0.44-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:471733aabb2e4848d609141a9e9d56a427c0a038f4abf65dd19d7a21fd563632"}, - {file = "SQLAlchemy-2.0.44-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48bf7d383a35e668b984c805470518b635d48b95a3c57cb03f37eaa3551b5f9f"}, - {file = "SQLAlchemy-2.0.44-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf4bb6b3d6228fcf3a71b50231199fb94d2dd2611b66d33be0578ea3e6c2726"}, - {file = "SQLAlchemy-2.0.44-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:e998cf7c29473bd077704cea3577d23123094311f59bdc4af551923b168332b1"}, - {file = "SQLAlchemy-2.0.44-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ebac3f0b5732014a126b43c2b7567f2f0e0afea7d9119a3378bde46d3dcad88e"}, - {file = "SQLAlchemy-2.0.44-cp37-cp37m-win32.whl", hash = "sha256:3255d821ee91bdf824795e936642bbf43a4c7cedf5d1aed8d24524e66843aa74"}, - {file = "SQLAlchemy-2.0.44-cp37-cp37m-win_amd64.whl", hash = "sha256:78e6c137ba35476adb5432103ae1534f2f5295605201d946a4198a0dea4b38e7"}, - {file = "sqlalchemy-2.0.44-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c77f3080674fc529b1bd99489378c7f63fcb4ba7f8322b79732e0258f0ea3ce"}, - {file = "sqlalchemy-2.0.44-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4c26ef74ba842d61635b0152763d057c8d48215d5be9bb8b7604116a059e9985"}, - {file = "sqlalchemy-2.0.44-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4a172b31785e2f00780eccab00bc240ccdbfdb8345f1e6063175b3ff12ad1b0"}, - {file = "sqlalchemy-2.0.44-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9480c0740aabd8cb29c329b422fb65358049840b34aba0adf63162371d2a96e"}, - {file = "sqlalchemy-2.0.44-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:17835885016b9e4d0135720160db3095dc78c583e7b902b6be799fb21035e749"}, - {file = "sqlalchemy-2.0.44-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cbe4f85f50c656d753890f39468fcd8190c5f08282caf19219f684225bfd5fd2"}, - {file = "sqlalchemy-2.0.44-cp310-cp310-win32.whl", hash = "sha256:2fcc4901a86ed81dc76703f3b93ff881e08761c63263c46991081fd7f034b165"}, - {file = "sqlalchemy-2.0.44-cp310-cp310-win_amd64.whl", hash = "sha256:9919e77403a483ab81e3423151e8ffc9dd992c20d2603bf17e4a8161111e55f5"}, - {file = "sqlalchemy-2.0.44-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fe3917059c7ab2ee3f35e77757062b1bea10a0b6ca633c58391e3f3c6c488dd"}, - {file = "sqlalchemy-2.0.44-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:de4387a354ff230bc979b46b2207af841dc8bf29847b6c7dbe60af186d97aefa"}, - {file = "sqlalchemy-2.0.44-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3678a0fb72c8a6a29422b2732fe423db3ce119c34421b5f9955873eb9b62c1e"}, - {file = "sqlalchemy-2.0.44-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cf6872a23601672d61a68f390e44703442639a12ee9dd5a88bbce52a695e46e"}, - {file = "sqlalchemy-2.0.44-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:329aa42d1be9929603f406186630135be1e7a42569540577ba2c69952b7cf399"}, - {file = "sqlalchemy-2.0.44-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:70e03833faca7166e6a9927fbee7c27e6ecde436774cd0b24bbcc96353bce06b"}, - {file = "sqlalchemy-2.0.44-cp311-cp311-win32.whl", hash = "sha256:253e2f29843fb303eca6b2fc645aca91fa7aa0aa70b38b6950da92d44ff267f3"}, - {file = "sqlalchemy-2.0.44-cp311-cp311-win_amd64.whl", hash = "sha256:7a8694107eb4308a13b425ca8c0e67112f8134c846b6e1f722698708741215d5"}, - {file = "sqlalchemy-2.0.44-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:72fea91746b5890f9e5e0997f16cbf3d53550580d76355ba2d998311b17b2250"}, - {file = "sqlalchemy-2.0.44-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:585c0c852a891450edbb1eaca8648408a3cc125f18cf433941fa6babcc359e29"}, - {file = "sqlalchemy-2.0.44-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b94843a102efa9ac68a7a30cd46df3ff1ed9c658100d30a725d10d9c60a2f44"}, - {file = "sqlalchemy-2.0.44-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:119dc41e7a7defcefc57189cfa0e61b1bf9c228211aba432b53fb71ef367fda1"}, - {file = "sqlalchemy-2.0.44-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0765e318ee9179b3718c4fd7ba35c434f4dd20332fbc6857a5e8df17719c24d7"}, - {file = "sqlalchemy-2.0.44-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2e7b5b079055e02d06a4308d0481658e4f06bc7ef211567edc8f7d5dce52018d"}, - {file = "sqlalchemy-2.0.44-cp312-cp312-win32.whl", hash = "sha256:846541e58b9a81cce7dee8329f352c318de25aa2f2bbe1e31587eb1f057448b4"}, - {file = "sqlalchemy-2.0.44-cp312-cp312-win_amd64.whl", hash = "sha256:7cbcb47fd66ab294703e1644f78971f6f2f1126424d2b300678f419aa73c7b6e"}, - {file = "sqlalchemy-2.0.44-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ff486e183d151e51b1d694c7aa1695747599bb00b9f5f604092b54b74c64a8e1"}, - {file = "sqlalchemy-2.0.44-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b1af8392eb27b372ddb783b317dea0f650241cea5bd29199b22235299ca2e45"}, - {file = "sqlalchemy-2.0.44-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b61188657e3a2b9ac4e8f04d6cf8e51046e28175f79464c67f2fd35bceb0976"}, - {file = "sqlalchemy-2.0.44-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b87e7b91a5d5973dda5f00cd61ef72ad75a1db73a386b62877d4875a8840959c"}, - {file = "sqlalchemy-2.0.44-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:15f3326f7f0b2bfe406ee562e17f43f36e16167af99c4c0df61db668de20002d"}, - {file = "sqlalchemy-2.0.44-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1e77faf6ff919aa8cd63f1c4e561cac1d9a454a191bb864d5dd5e545935e5a40"}, - {file = "sqlalchemy-2.0.44-cp313-cp313-win32.whl", hash = "sha256:ee51625c2d51f8baadf2829fae817ad0b66b140573939dd69284d2ba3553ae73"}, - {file = "sqlalchemy-2.0.44-cp313-cp313-win_amd64.whl", hash = "sha256:c1c80faaee1a6c3428cecf40d16a2365bcf56c424c92c2b6f0f9ad204b899e9e"}, - {file = "sqlalchemy-2.0.44-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2fc44e5965ea46909a416fff0af48a219faefd5773ab79e5f8a5fcd5d62b2667"}, - {file = "sqlalchemy-2.0.44-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dc8b3850d2a601ca2320d081874033684e246d28e1c5e89db0864077cfc8f5a9"}, - {file = "sqlalchemy-2.0.44-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d733dec0614bb8f4bcb7c8af88172b974f685a31dc3a65cca0527e3120de5606"}, - {file = "sqlalchemy-2.0.44-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22be14009339b8bc16d6b9dc8780bacaba3402aa7581658e246114abbd2236e3"}, - {file = "sqlalchemy-2.0.44-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:357bade0e46064f88f2c3a99808233e67b0051cdddf82992379559322dfeb183"}, - {file = "sqlalchemy-2.0.44-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4848395d932e93c1595e59a8672aa7400e8922c39bb9b0668ed99ac6fa867822"}, - {file = "sqlalchemy-2.0.44-cp38-cp38-win32.whl", hash = "sha256:2f19644f27c76f07e10603580a47278abb2a70311136a7f8fd27dc2e096b9013"}, - {file = "sqlalchemy-2.0.44-cp38-cp38-win_amd64.whl", hash = "sha256:1df4763760d1de0dfc8192cc96d8aa293eb1a44f8f7a5fbe74caf1b551905c5e"}, - {file = "sqlalchemy-2.0.44-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f7027414f2b88992877573ab780c19ecb54d3a536bef3397933573d6b5068be4"}, - {file = "sqlalchemy-2.0.44-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fe166c7d00912e8c10d3a9a0ce105569a31a3d0db1a6e82c4e0f4bf16d5eca9"}, - {file = "sqlalchemy-2.0.44-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3caef1ff89b1caefc28f0368b3bde21a7e3e630c2eddac16abd9e47bd27cc36a"}, - {file = "sqlalchemy-2.0.44-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc2856d24afa44295735e72f3c75d6ee7fdd4336d8d3a8f3d44de7aa6b766df2"}, - {file = "sqlalchemy-2.0.44-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:11bac86b0deada30b6b5f93382712ff0e911fe8d31cb9bf46e6b149ae175eff0"}, - {file = "sqlalchemy-2.0.44-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d18cd0e9a0f37c9f4088e50e3839fcb69a380a0ec957408e0b57cff08ee0a26"}, - {file = "sqlalchemy-2.0.44-cp39-cp39-win32.whl", hash = "sha256:9e9018544ab07614d591a26c1bd4293ddf40752cc435caf69196740516af7100"}, - {file = "sqlalchemy-2.0.44-cp39-cp39-win_amd64.whl", hash = "sha256:8e0e4e66fd80f277a8c3de016a81a554e76ccf6b8d881ee0b53200305a8433f6"}, - {file = "sqlalchemy-2.0.44-py3-none-any.whl", hash = "sha256:19de7ca1246fbef9f9d1bff8f1ab25641569df226364a0e40457dc5457c54b05"}, - {file = "sqlalchemy-2.0.44.tar.gz", hash = "sha256:0ae7454e1ab1d780aee69fd2aae7d6b8670a581d8847f2d1e0f7ddfbf47e5a22"}, + {file = "sqlalchemy-2.0.45-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c64772786d9eee72d4d3784c28f0a636af5b0a29f3fe26ff11f55efe90c0bd85"}, + {file = "sqlalchemy-2.0.45-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7ae64ebf7657395824a19bca98ab10eb9a3ecb026bf09524014f1bb81cb598d4"}, + {file = "sqlalchemy-2.0.45-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f02325709d1b1a1489f23a39b318e175a171497374149eae74d612634b234c0"}, + {file = "sqlalchemy-2.0.45-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2c3684fca8a05f0ac1d9a21c1f4a266983a7ea9180efb80ffeb03861ecd01a0"}, + {file = "sqlalchemy-2.0.45-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040f6f0545b3b7da6b9317fc3e922c9a98fc7243b2a1b39f78390fc0942f7826"}, + {file = "sqlalchemy-2.0.45-cp310-cp310-win32.whl", hash = "sha256:830d434d609fe7bfa47c425c445a8b37929f140a7a44cdaf77f6d34df3a7296a"}, + {file = "sqlalchemy-2.0.45-cp310-cp310-win_amd64.whl", hash = "sha256:0209d9753671b0da74da2cfbb9ecf9c02f72a759e4b018b3ab35f244c91842c7"}, + {file = "sqlalchemy-2.0.45-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e90a344c644a4fa871eb01809c32096487928bd2038bf10f3e4515cb688cc56"}, + {file = "sqlalchemy-2.0.45-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8c8b41b97fba5f62349aa285654230296829672fc9939cd7f35aab246d1c08b"}, + {file = "sqlalchemy-2.0.45-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:12c694ed6468333a090d2f60950e4250b928f457e4962389553d6ba5fe9951ac"}, + {file = "sqlalchemy-2.0.45-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f7d27a1d977a1cfef38a0e2e1ca86f09c4212666ce34e6ae542f3ed0a33bc606"}, + {file = "sqlalchemy-2.0.45-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d62e47f5d8a50099b17e2bfc1b0c7d7ecd8ba6b46b1507b58cc4f05eefc3bb1c"}, + {file = "sqlalchemy-2.0.45-cp311-cp311-win32.whl", hash = "sha256:3c5f76216e7b85770d5bb5130ddd11ee89f4d52b11783674a662c7dd57018177"}, + {file = "sqlalchemy-2.0.45-cp311-cp311-win_amd64.whl", hash = "sha256:a15b98adb7f277316f2c276c090259129ee4afca783495e212048daf846654b2"}, + {file = "sqlalchemy-2.0.45-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3ee2aac15169fb0d45822983631466d60b762085bc4535cd39e66bea362df5f"}, + {file = "sqlalchemy-2.0.45-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba547ac0b361ab4f1608afbc8432db669bd0819b3e12e29fb5fa9529a8bba81d"}, + {file = "sqlalchemy-2.0.45-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:215f0528b914e5c75ef2559f69dca86878a3beeb0c1be7279d77f18e8d180ed4"}, + {file = "sqlalchemy-2.0.45-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:107029bf4f43d076d4011f1afb74f7c3e2ea029ec82eb23d8527d5e909e97aa6"}, + {file = "sqlalchemy-2.0.45-cp312-cp312-win32.whl", hash = "sha256:0c9f6ada57b58420a2c0277ff853abe40b9e9449f8d7d231763c6bc30f5c4953"}, + {file = "sqlalchemy-2.0.45-cp312-cp312-win_amd64.whl", hash = "sha256:8defe5737c6d2179c7997242d6473587c3beb52e557f5ef0187277009f73e5e1"}, + {file = "sqlalchemy-2.0.45-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fe187fc31a54d7fd90352f34e8c008cf3ad5d064d08fedd3de2e8df83eb4a1cf"}, + {file = "sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:672c45cae53ba88e0dad74b9027dddd09ef6f441e927786b05bec75d949fbb2e"}, + {file = "sqlalchemy-2.0.45-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:470daea2c1ce73910f08caf10575676a37159a6d16c4da33d0033546bddebc9b"}, + {file = "sqlalchemy-2.0.45-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9c6378449e0940476577047150fd09e242529b761dc887c9808a9a937fe990c8"}, + {file = "sqlalchemy-2.0.45-cp313-cp313-win32.whl", hash = "sha256:4b6bec67ca45bc166c8729910bd2a87f1c0407ee955df110d78948f5b5827e8a"}, + {file = "sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl", hash = "sha256:afbf47dc4de31fa38fd491f3705cac5307d21d4bb828a4f020ee59af412744ee"}, + {file = "sqlalchemy-2.0.45-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:83d7009f40ce619d483d26ac1b757dfe3167b39921379a8bd1b596cf02dab4a6"}, + {file = "sqlalchemy-2.0.45-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d8a2ca754e5415cde2b656c27900b19d50ba076aa05ce66e2207623d3fe41f5a"}, + {file = "sqlalchemy-2.0.45-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f46ec744e7f51275582e6a24326e10c49fbdd3fc99103e01376841213028774"}, + {file = "sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:883c600c345123c033c2f6caca18def08f1f7f4c3ebeb591a63b6fceffc95cce"}, + {file = "sqlalchemy-2.0.45-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2c0b74aa79e2deade948fe8593654c8ef4228c44ba862bb7c9585c8e0db90f33"}, + {file = "sqlalchemy-2.0.45-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8a420169cef179d4c9064365f42d779f1e5895ad26ca0c8b4c0233920973db74"}, + {file = "sqlalchemy-2.0.45-cp314-cp314-win32.whl", hash = "sha256:e50dcb81a5dfe4b7b4a4aa8f338116d127cb209559124f3694c70d6cd072b68f"}, + {file = "sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl", hash = "sha256:4748601c8ea959e37e03d13dcda4a44837afcd1b21338e637f7c935b8da06177"}, + {file = "sqlalchemy-2.0.45-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd337d3526ec5298f67d6a30bbbe4ed7e5e68862f0bf6dd21d289f8d37b7d60b"}, + {file = "sqlalchemy-2.0.45-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9a62b446b7d86a3909abbcd1cd3cc550a832f99c2bc37c5b22e1925438b9367b"}, + {file = "sqlalchemy-2.0.45-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5964f832431b7cdfaaa22a660b4c7eb1dfcd6ed41375f67fd3e3440fd95cb3cc"}, + {file = "sqlalchemy-2.0.45-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee580ab50e748208754ae8980cec79ec205983d8cf8b3f7c39067f3d9f2c8e22"}, + {file = "sqlalchemy-2.0.45-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13e27397a7810163440c6bfed6b3fe46f1bfb2486eb540315a819abd2c004128"}, + {file = "sqlalchemy-2.0.45-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ed3635353e55d28e7f4a95c8eda98a5cdc0a0b40b528433fbd41a9ae88f55b3d"}, + {file = "sqlalchemy-2.0.45-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:db6834900338fb13a9123307f0c2cbb1f890a8656fcd5e5448ae3ad5bbe8d312"}, + {file = "sqlalchemy-2.0.45-cp38-cp38-win32.whl", hash = "sha256:1d8b4a7a8c9b537509d56d5cd10ecdcfbb95912d72480c8861524efecc6a3fff"}, + {file = "sqlalchemy-2.0.45-cp38-cp38-win_amd64.whl", hash = "sha256:ebd300afd2b62679203435f596b2601adafe546cb7282d5a0cd3ed99e423720f"}, + {file = "sqlalchemy-2.0.45-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d29b2b99d527dbc66dd87c3c3248a5dd789d974a507f4653c969999fc7c1191b"}, + {file = "sqlalchemy-2.0.45-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:59a8b8bd9c6bedf81ad07c8bd5543eedca55fe9b8780b2b628d495ba55f8db1e"}, + {file = "sqlalchemy-2.0.45-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd93c6f5d65f254ceabe97548c709e073d6da9883343adaa51bf1a913ce93f8e"}, + {file = "sqlalchemy-2.0.45-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6d0beadc2535157070c9c17ecf25ecec31e13c229a8f69196d7590bde8082bf1"}, + {file = "sqlalchemy-2.0.45-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e057f928ffe9c9b246a55b469c133b98a426297e1772ad24ce9f0c47d123bd5b"}, + {file = "sqlalchemy-2.0.45-cp39-cp39-win32.whl", hash = "sha256:c1c2091b1489435ff85728fafeb990f073e64f6f5e81d5cd53059773e8521eb6"}, + {file = "sqlalchemy-2.0.45-cp39-cp39-win_amd64.whl", hash = "sha256:56ead1f8dfb91a54a28cd1d072c74b3d635bcffbd25e50786533b822d4f2cde2"}, + {file = "sqlalchemy-2.0.45-py3-none-any.whl", hash = "sha256:5225a288e4c8cc2308dbdd874edad6e7d0fd38eac1e9e5f23503425c8eee20d0"}, + {file = "sqlalchemy-2.0.45.tar.gz", hash = "sha256:1632a4bda8d2d25703fdad6363058d882541bdaaee0e5e3ddfa0cd3229efce88"}, ] [package.dependencies] @@ -2649,16 +3770,36 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3_binary"] +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +groups = ["vectorbt"] +files = [ + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + [[package]] name = "structlog" -version = "25.4.0" +version = "25.5.0" description = "Structured Logging for Python" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "structlog-25.4.0-py3-none-any.whl", hash = "sha256:fe809ff5c27e557d14e613f45ca441aabda051d119ee5a0102aaba6ce40eed2c"}, - {file = "structlog-25.4.0.tar.gz", hash = "sha256:186cd1b0a8ae762e29417095664adf1d6a31702160a46dacb7796ea82f7409e4"}, + {file = "structlog-25.5.0-py3-none-any.whl", hash = "sha256:a8453e9b9e636ec59bd9e79bbd4a72f025981b3ba0f5837aebf48f02f37a7f9f"}, + {file = "structlog-25.5.0.tar.gz", hash = "sha256:098522a3bebed9153d4570c6d0288abf80a031dfdb2048d59a49e9dc2190fc98"}, ] [[package]] @@ -2676,6 +3817,18 @@ files = [ [package.extras] widechars = ["wcwidth"] +[[package]] +name = "threadpoolctl" +version = "3.6.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.9" +groups = ["vectorbt"] +files = [ + {file = "threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb"}, + {file = "threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e"}, +] + [[package]] name = "toolz" version = "1.1.0" @@ -2688,6 +3841,44 @@ files = [ {file = "toolz-1.1.0.tar.gz", hash = "sha256:27a5c770d068c110d9ed9323f24f1543e83b2f300a687b7891c1a6d56b697b5b"}, ] +[[package]] +name = "tqdm" +version = "4.67.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +groups = ["vectorbt"] +files = [ + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +groups = ["vectorbt"] +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + [[package]] name = "typing-extensions" version = "4.15.0" @@ -2717,33 +3908,86 @@ typing-extensions = ">=4.12.0" [[package]] name = "tzdata" -version = "2025.2" +version = "2025.3" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" -groups = ["main"] +groups = ["main", "vectorbt"] files = [ - {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, - {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, + {file = "tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1"}, + {file = "tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7"}, ] +[[package]] +name = "tzlocal" +version = "5.3.1" +description = "tzinfo object for the local timezone" +optional = false +python-versions = ">=3.9" +groups = ["vectorbt"] +files = [ + {file = "tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d"}, + {file = "tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd"}, +] + +[package.dependencies] +tzdata = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +devenv = ["check-manifest", "pytest (>=4.3)", "pytest-cov", "pytest-mock (>=3.3)", "zest.releaser"] + [[package]] name = "urllib3" -version = "2.5.0" +version = "2.6.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main", "docs"] +groups = ["main", "docs", "vectorbt"] files = [ - {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, - {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, + {file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"}, + {file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"}, ] [package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +brotli = ["brotli (>=1.2.0) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=1.2.0.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] +zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] + +[[package]] +name = "vectorbt" +version = "0.28.2" +description = "Python library for backtesting and analyzing trading strategies at scale" +optional = false +python-versions = ">=3.6" +groups = ["vectorbt"] +files = [ + {file = "vectorbt-0.28.2-py3-none-any.whl", hash = "sha256:93e5fb20d2ff072b7fed78603b516eb64f967c9bf9420ce8ba28329af0410e7d"}, + {file = "vectorbt-0.28.2.tar.gz", hash = "sha256:e1a5b7a11c0e2b5b271f18093cb7d1ea075d94d711388c0f423355e83c63c104"}, +] + +[package.dependencies] +dateparser = "*" +dill = "*" +imageio = "*" +ipywidgets = ">=7.0.0" +matplotlib = "*" +mypy_extensions = "*" +numba = {version = ">=0.57.0", markers = "python_version >= \"3.11\""} +numpy = ">=1.16.5" +pandas = "*" +plotly = ">=4.12.0" +pytz = "*" +requests = "*" +schedule = "*" +scikit-learn = "*" +scipy = "*" +tqdm = "*" + +[package.extras] +cov = ["codecov", "pytest", "pytest-cov"] +full = ["TA-Lib", "alpaca-py", "ccxt (>=4.0.14)", "pandas-ta-classic", "python-binance", "python-telegram-bot (>=13.4,<20.0)", "quantstats (>=0.0.37)", "ray (>=1.4.1)", "ta", "yfinance (>=0.2.22)"] +full-no-talib = ["alpaca-py", "ccxt (>=4.0.14)", "pandas-ta-classic", "python-binance", "python-telegram-bot (>=13.4,<20.0)", "quantstats (>=0.0.37)", "ray (>=1.4.1)", "ta", "yfinance (>=0.2.22)"] [[package]] name = "watchdog" @@ -2788,6 +4032,18 @@ files = [ [package.extras] watchmedo = ["PyYAML (>=3.10)"] +[[package]] +name = "wcwidth" +version = "0.2.14" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = ">=3.6" +groups = ["vectorbt"] +files = [ + {file = "wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1"}, + {file = "wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605"}, +] + [[package]] name = "websocket-client" version = "1.9.0" @@ -2807,81 +4063,85 @@ test = ["pytest", "websockets"] [[package]] name = "websockets" -version = "15.0.1" +version = "16.0" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"}, - {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"}, - {file = "websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a"}, - {file = "websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e"}, - {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf"}, - {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb"}, - {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d"}, - {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9"}, - {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c"}, - {file = "websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256"}, - {file = "websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41"}, - {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431"}, - {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57"}, - {file = "websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905"}, - {file = "websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562"}, - {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792"}, - {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413"}, - {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8"}, - {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3"}, - {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf"}, - {file = "websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85"}, - {file = "websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065"}, - {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3"}, - {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665"}, - {file = "websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2"}, - {file = "websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215"}, - {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5"}, - {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65"}, - {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe"}, - {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4"}, - {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597"}, - {file = "websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9"}, - {file = "websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7"}, - {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931"}, - {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675"}, - {file = "websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151"}, - {file = "websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22"}, - {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f"}, - {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8"}, - {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375"}, - {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d"}, - {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4"}, - {file = "websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa"}, - {file = "websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561"}, - {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5"}, - {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a"}, - {file = "websockets-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b"}, - {file = "websockets-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770"}, - {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb"}, - {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054"}, - {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee"}, - {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed"}, - {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880"}, - {file = "websockets-15.0.1-cp39-cp39-win32.whl", hash = "sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411"}, - {file = "websockets-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04"}, - {file = "websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f"}, - {file = "websockets-15.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123"}, - {file = "websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f"}, - {file = "websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee"}, + {file = "websockets-16.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04cdd5d2d1dacbad0a7bf36ccbcd3ccd5a30ee188f2560b7a62a30d14107b31a"}, + {file = "websockets-16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8ff32bb86522a9e5e31439a58addbb0166f0204d64066fb955265c4e214160f0"}, + {file = "websockets-16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:583b7c42688636f930688d712885cf1531326ee05effd982028212ccc13e5957"}, + {file = "websockets-16.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7d837379b647c0c4c2355c2499723f82f1635fd2c26510e1f587d89bc2199e72"}, + {file = "websockets-16.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df57afc692e517a85e65b72e165356ed1df12386ecb879ad5693be08fac65dde"}, + {file = "websockets-16.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2b9f1e0d69bc60a4a87349d50c09a037a2607918746f07de04df9e43252c77a3"}, + {file = "websockets-16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:335c23addf3d5e6a8633f9f8eda77efad001671e80b95c491dd0924587ece0b3"}, + {file = "websockets-16.0-cp310-cp310-win32.whl", hash = "sha256:37b31c1623c6605e4c00d466c9d633f9b812ea430c11c8a278774a1fde1acfa9"}, + {file = "websockets-16.0-cp310-cp310-win_amd64.whl", hash = "sha256:8e1dab317b6e77424356e11e99a432b7cb2f3ec8c5ab4dabbcee6add48f72b35"}, + {file = "websockets-16.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:31a52addea25187bde0797a97d6fc3d2f92b6f72a9370792d65a6e84615ac8a8"}, + {file = "websockets-16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:417b28978cdccab24f46400586d128366313e8a96312e4b9362a4af504f3bbad"}, + {file = "websockets-16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af80d74d4edfa3cb9ed973a0a5ba2b2a549371f8a741e0800cb07becdd20f23d"}, + {file = "websockets-16.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:08d7af67b64d29823fed316505a89b86705f2b7981c07848fb5e3ea3020c1abe"}, + {file = "websockets-16.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7be95cfb0a4dae143eaed2bcba8ac23f4892d8971311f1b06f3c6b78952ee70b"}, + {file = "websockets-16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d6297ce39ce5c2e6feb13c1a996a2ded3b6832155fcfc920265c76f24c7cceb5"}, + {file = "websockets-16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c1b30e4f497b0b354057f3467f56244c603a79c0d1dafce1d16c283c25f6e64"}, + {file = "websockets-16.0-cp311-cp311-win32.whl", hash = "sha256:5f451484aeb5cafee1ccf789b1b66f535409d038c56966d6101740c1614b86c6"}, + {file = "websockets-16.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d7f0659570eefb578dacde98e24fb60af35350193e4f56e11190787bee77dac"}, + {file = "websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00"}, + {file = "websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79"}, + {file = "websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39"}, + {file = "websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c"}, + {file = "websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f"}, + {file = "websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1"}, + {file = "websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2"}, + {file = "websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89"}, + {file = "websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea"}, + {file = "websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9"}, + {file = "websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230"}, + {file = "websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c"}, + {file = "websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5"}, + {file = "websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82"}, + {file = "websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8"}, + {file = "websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f"}, + {file = "websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a"}, + {file = "websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156"}, + {file = "websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0"}, + {file = "websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904"}, + {file = "websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4"}, + {file = "websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e"}, + {file = "websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4"}, + {file = "websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1"}, + {file = "websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3"}, + {file = "websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8"}, + {file = "websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d"}, + {file = "websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244"}, + {file = "websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e"}, + {file = "websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641"}, + {file = "websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8"}, + {file = "websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e"}, + {file = "websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944"}, + {file = "websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206"}, + {file = "websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6"}, + {file = "websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd"}, + {file = "websockets-16.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:349f83cd6c9a415428ee1005cadb5c2c56f4389bc06a9af16103c3bc3dcc8b7d"}, + {file = "websockets-16.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:4a1aba3340a8dca8db6eb5a7986157f52eb9e436b74813764241981ca4888f03"}, + {file = "websockets-16.0-pp311-pypy311_pp73-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f4a32d1bd841d4bcbffdcb3d2ce50c09c3909fbead375ab28d0181af89fd04da"}, + {file = "websockets-16.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0298d07ee155e2e9fda5be8a9042200dd2e3bb0b8a38482156576f863a9d457c"}, + {file = "websockets-16.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a653aea902e0324b52f1613332ddf50b00c06fdaf7e92624fbf8c77c78fa5767"}, + {file = "websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec"}, + {file = "websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5"}, +] + +[[package]] +name = "widgetsnbextension" +version = "4.0.15" +description = "Jupyter interactive widgets for Jupyter Notebook" +optional = false +python-versions = ">=3.7" +groups = ["vectorbt"] +files = [ + {file = "widgetsnbextension-4.0.15-py3-none-any.whl", hash = "sha256:8156704e4346a571d9ce73b84bee86a29906c9abfd7223b7228a28899ccf3366"}, + {file = "widgetsnbextension-4.0.15.tar.gz", hash = "sha256:de8610639996f1567952d763a5a41af8af37f2575a41f9852a38f947eb82a3b9"}, ] [[package]] @@ -2917,4 +4177,4 @@ repair = ["scipy (>=1.6.3)"] [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "823a0509d46fae636725235f5d8b75249d25dde2528c225c7c2e50a3375eb0f0" +content-hash = "bfe1c24f602b59bc4dd37e07c5905c37a7cccf260f6ea7756e7d472be0a96663" diff --git a/pyproject.toml b/pyproject.toml index 08d69c9bfc..75f97ce8df 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "ziplime" -version = "1.11.11" +version = "1.13.19" description = "" authors = ["Ziplime"] readme = "README.md" @@ -37,9 +37,13 @@ structlog = "^25.4.0" tabulate = "^0.9.0" python-dateutil = "^2.9.0.post0" yfinance = "^0.2.65" -protobuf = "^6.32.0" -grpcio = "^1.74.0" -googleapis-common-protos = "^1.70.0" + +[tool.poetry.group.vectorbt] +optional = true + +[tool.poetry.group.vectorbt.dependencies] +vectorbt = "0.28.2" + [tool.poetry.group.docs.dependencies] mkdocs = {extras = ["i18n"], version = "^1.6.1"} diff --git a/ziplime/assets/entities/asset.py b/ziplime/assets/entities/asset.py index 1296183a2e..02d6c0ef0d 100644 --- a/ziplime/assets/entities/asset.py +++ b/ziplime/assets/entities/asset.py @@ -15,3 +15,6 @@ class Asset: @abstractmethod def get_symbol_by_exchange(self, exchange_name: str | None) -> str | None: ... + + def __hash__(self): + return hash(self.sid) diff --git a/ziplime/assets/repositories/adjustments_repository.py b/ziplime/assets/repositories/adjustments_repository.py index 0b0546dad6..49c3703ebb 100644 --- a/ziplime/assets/repositories/adjustments_repository.py +++ b/ziplime/assets/repositories/adjustments_repository.py @@ -7,9 +7,11 @@ class AdjustmentRepository: - def get_splits(self, assets: frozenset[Asset], dt: datetime.date): ... + async def get_splits(self, assets: frozenset[Asset], dt: datetime.date): ... - def get_stock_dividends(self, sid: int, trading_days: pl.Series) -> list[Dividend]: ... + async def get_stock_dividends(self, sid: int, trading_days: pl.Series) -> list[Dividend]: ... + + async def load_pricing_adjustments(self, columns, dates, assets): ... def to_json(self): ... diff --git a/ziplime/assets/repositories/asset_repository.py b/ziplime/assets/repositories/asset_repository.py index 540a8c59c9..6a51a38d51 100644 --- a/ziplime/assets/repositories/asset_repository.py +++ b/ziplime/assets/repositories/asset_repository.py @@ -1,4 +1,6 @@ from typing import Any, Self + +import pandas as pd import polars as pl import aiocache from aiocache import Cache @@ -40,6 +42,9 @@ async def get_equity_by_symbol(self, symbol: str, exchange_name: str) -> Equity async def get_equities_by_symbols(self, symbols: list[str]) -> list[Equity]: ... async def get_equities_by_symbols_and_exchange(self, symbols: list[str], exchange_name: str) -> list[Equity]: ... async def get_symbols_universe(self, symbol: str) -> SymbolsUniverse | None: ... + async def lifetimes(self, dates: pd.DatetimeIndex, include_start_date: bool, country_codes: list[str]): ... + async def asset_lifetimes(self, assets:list[Asset], dates: pd.DatetimeIndex, include_start_date: bool): ... + async def retrieve_all(self, sids: list[int], default_none: bool = False): ... @aiocache.cached(cache=Cache.MEMORY) async def get_asset_by_symbol(self, symbol: str, asset_type: AssetType, exchange_name: str) -> Asset | None: diff --git a/ziplime/assets/repositories/sqlalchemy_adjustments_repository.py b/ziplime/assets/repositories/sqlalchemy_adjustments_repository.py index f6847a6d53..fd78983299 100644 --- a/ziplime/assets/repositories/sqlalchemy_adjustments_repository.py +++ b/ziplime/assets/repositories/sqlalchemy_adjustments_repository.py @@ -1,18 +1,22 @@ import datetime -import logging import sqlite3 from collections import namedtuple from functools import lru_cache -from pathlib import Path +from itertools import chain from typing import Self, Any import polars as pl import numpy as np import pandas as pd import structlog from numpy import integer as any_integer +from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine from ziplime.assets.entities.asset import Asset +from ziplime.assets.models.dividend import Dividend +from ziplime.assets.models.merger import Merger +from ziplime.assets.models.split import Split +from ziplime.lib.adjustment import Float64Multiply from ziplime.utils.functional import keysorted from ziplime.utils.numpy_utils import ( datetime64ns_dtype, @@ -21,10 +25,10 @@ uint32_dtype, uint64_dtype, ) -from ziplime.utils.pandas_utils import empty_dataframe -from ziplime.utils.sqlite_utils import group_into_chunks +from ziplime.utils.pandas_utils import empty_dataframe, timedelta_to_integral_seconds +from ziplime.utils.sqlite_utils import group_into_chunks, SQLITE_MAX_VARIABLE_NUMBER -from ziplime.data.adjustments import load_adjustments_from_sqlite +from ziplime.data.adjustments import _lookup_dt, EPOCH, ADJ_QUERY_TEMPLATE, SID_QUERIES from ziplime.assets.repositories.adjustments_repository import AdjustmentRepository @@ -39,7 +43,7 @@ AND sid IN ({0}) \ """ -Dividend = namedtuple("Dividend", ["asset", "amount", "pay_date"]) +# Dividend = namedtuple("Dividend", ["asset", "amount", "pay_date"]) UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE = """ SELECT sid, payment_sid, ratio, pay_date @@ -151,7 +155,274 @@ def session_maker(self) -> async_sessionmaker[AsyncSession]: expire_on_commit=False) return session_maker - def load_adjustments( + async def _get_sids_from_table(db, + tablename: str, + start_date: int, + end_date: int) -> set: + """Get the unique sids for all adjustments between start_date and end_date + from table `tablename`. + + Parameters + ---------- + db : sqlite3.connection + tablename : str + start_date : int (seconds since epoch) + end_date : int (seconds since epoch) + + Returns + ------- + sids : set + Set of sets + """ + + cursor = db.execute( + SID_QUERIES[tablename], + (start_date, end_date), + ) + out = set() + for result in cursor.fetchall(): + out.add(result[0]) + return out + + async def _get_split_sids(self, db: AsyncSession, start_date: int, end_date: int) -> set: + # return await self._get_sids_from_table(db, 'splits', start_date, end_date) + q = select(Split.sid).filter(Split.effective_date >= start_date, Split.effective_date <= end_date).distinct() + result = set((await db.execute(q)).scalars()) + return result + + async def _get_merger_sids(self, db: AsyncSession, start_date: int, end_date: int) -> set: + q = select(Merger.sid).filter(Merger.effective_date >= start_date, Merger.effective_date <= end_date).distinct() + result = set((await db.execute(q)).scalars()) + return result + + # return await self._get_sids_from_table(db, 'mergers', start_date, end_date) + + async def _get_dividend_sids(self, db: AsyncSession, start_date: int, end_date: int) -> set: + + """ + SELECT DISTINCT sid FROM {0} + WHERE effective_date >= ? AND effective_date <= ? + """ + q = select(Dividend.sid).filter(Dividend.effective_date >= start_date, Dividend.effective_date <= end_date).distinct() + result = set((await db.execute(q)).scalars()) + return result + # return await self._get_sids_from_table(db, 'dividends', start_date, end_date) + + async def _adjustments(self, + adjustments_db: AsyncSession, + split_sids: set, + merger_sids: set, + dividends_sids: set, + start_date: int, + end_date: int, + assets: pd.Index): + + splits_to_query = [str(a) for a in assets if a in split_sids] + splits_results = [] + while splits_to_query: + query_len = min(len(splits_to_query), SQLITE_MAX_VARIABLE_NUMBER) + query_assets = splits_to_query[:query_len] + t = [str(a) for a in query_assets] + statement = ADJ_QUERY_TEMPLATE.format( + 'splits', + ",".join(['?' for _ in query_assets]), + start_date, + end_date, + ) + c.execute(statement, t) + splits_to_query = splits_to_query[query_len:] + splits_results.extend(c.fetchall()) + + mergers_to_query = [str(a) for a in assets if a in merger_sids] + mergers_results = [] + while mergers_to_query: + query_len = min(len(mergers_to_query), SQLITE_MAX_VARIABLE_NUMBER) + query_assets = mergers_to_query[:query_len] + t = [str(a) for a in query_assets] + statement = ADJ_QUERY_TEMPLATE.format( + 'mergers', + ",".join(['?' for _ in query_assets]), + start_date, + end_date, + ) + c.execute(statement, t) + mergers_to_query = mergers_to_query[query_len:] + mergers_results.extend(c.fetchall()) + + dividends_to_query = [str(a) for a in assets if a in dividends_sids] + dividends_results = [] + while dividends_to_query: + query_len = min(len(dividends_to_query), SQLITE_MAX_VARIABLE_NUMBER) + query_assets = dividends_to_query[:query_len] + t = [str(a) for a in query_assets] + statement = ADJ_QUERY_TEMPLATE.format( + 'dividends', + ",".join(['?' for _ in query_assets]), + start_date, + end_date, + ) + c.execute(statement, t) + dividends_to_query = dividends_to_query[query_len:] + dividends_results.extend(c.fetchall()) + + return splits_results, mergers_results, dividends_results + + async def load_adjustments_from_sqlite(self, + db_session: AsyncSession, + dates: pd.DatetimeIndex, + assets: pd.Index, + should_include_splits: bool, + should_include_mergers: bool, + should_include_dividends: bool, + adjustment_type: str): + """Load a dictionary of Adjustment objects from adjustments_db. + + Parameters + ---------- + adjustments_db : sqlite3.Connection + Connection to a sqlite3 table in the format written by + SQLiteAdjustmentWriter. + dates : pd.DatetimeIndex + Dates for which adjustments are needed. + assets : pd.Int64Index + Assets for which adjustments are needed. + should_include_splits : bool + Whether split adjustments should be included. + should_include_mergers : bool + Whether merger adjustments should be included. + should_include_dividends : bool + Whether dividend adjustments should be included. + adjustment_type : str + Whether price adjustments, volume adjustments, or both, should be + included in the output. + + Returns + ------- + adjustments : dict[str -> dict[int -> Adjustment]] + A dictionary containing price and/or volume adjustment mappings from + index to adjustment objects to apply at that index. + """ + + if not (adjustment_type == 'price' or + adjustment_type == 'volume' or + adjustment_type == 'all'): + raise ValueError( + "%s is not a valid adjustment type.\n" + "Valid adjustment types are 'price', 'volume', and 'all'.\n" % ( + adjustment_type, + ) + ) + + should_include_price_adjustments = bool( + adjustment_type == 'all' or adjustment_type == 'price' + ) + should_include_volume_adjustments = bool( + adjustment_type == 'all' or adjustment_type == 'volume' + ) + + if not should_include_price_adjustments: + should_include_mergers = False + should_include_dividends = False + + start_date = dates[0].to_pydatetime().date() + end_date = dates[-1].to_pydatetime().date() + # TODO: localize dates for adjustments + # start_date = dates[0].tz_localize(self.trading_calendar.tz).to_pydatetime().date() + # end_date = dates[-1].tz_localize(self.trading_calendar.tz).to_pydatetime().date() + + if should_include_splits: + split_sids = await self._get_split_sids( + db_session, + start_date, + end_date, + ) + else: + split_sids = set() + + if should_include_mergers: + merger_sids = await self._get_merger_sids( + db_session, + start_date, + end_date, + ) + else: + merger_sids = set() + + if should_include_dividends: + dividend_sids = await self._get_dividend_sids( + db_session, + start_date, + end_date, + ) + else: + dividend_sids = set() + + splits, mergers, dividends = await self._adjustments( + db_session, + split_sids, + merger_sids, + dividend_sids, + start_date, + end_date, + assets, + ) + + price_adjustments = {} + volume_adjustments = {} + result = {} + asset_ixs = {} # Cache sid lookups here. + date_ixs = {} + + _dates_seconds = \ + dates.values.astype('datetime64[s]').view(np.int64) + + # Pre-populate date index cache. + for i, dt in enumerate(_dates_seconds): + date_ixs[dt] = i + + # splits affect prices and volumes, volumes is the inverse + for sid, ratio, eff_date in splits: + if eff_date < start_date: + continue + + date_loc = _lookup_dt(date_ixs, eff_date, _dates_seconds) + + if sid not in asset_ixs: + asset_ixs[sid] = assets.get_loc(sid) + asset_ix = asset_ixs[sid] + + if should_include_price_adjustments: + price_adj = Float64Multiply(0, date_loc, asset_ix, asset_ix, ratio) + price_adjustments.setdefault(date_loc, []).append(price_adj) + + if should_include_volume_adjustments: + volume_adj = Float64Multiply( + 0, date_loc, asset_ix, asset_ix, 1.0 / ratio + ) + volume_adjustments.setdefault(date_loc, []).append(volume_adj) + + # mergers and dividends affect prices only + for sid, ratio, eff_date in chain(mergers, dividends): + if eff_date < start_date: + continue + + date_loc = _lookup_dt(date_ixs, eff_date, _dates_seconds) + + if sid not in asset_ixs: + asset_ixs[sid] = assets.get_loc(sid) + asset_ix = asset_ixs[sid] + + price_adj = Float64Multiply(0, date_loc, asset_ix, asset_ix, ratio) + price_adjustments.setdefault(date_loc, []).append(price_adj) + + if should_include_price_adjustments: + result['price'] = price_adjustments + if should_include_volume_adjustments: + result['volume'] = volume_adjustments + + return result + + async def load_adjustments( self, dates, assets, @@ -185,17 +456,19 @@ def load_adjustments( from index to adjustment objects to apply at that index. """ dates = dates.tz_localize("UTC") - return load_adjustments_from_sqlite( - self.conn, - dates, - assets, - should_include_splits, - should_include_mergers, - should_include_dividends, - adjustment_type, - ) - def load_pricing_adjustments(self, columns, dates, assets): + async with self.session_maker() as session: + return await self.load_adjustments_from_sqlite( + session, + dates, + assets, + should_include_splits, + should_include_mergers, + should_include_dividends, + adjustment_type, + ) + + async def load_pricing_adjustments(self, columns, dates, assets): if "volume" not in set(columns): adjustment_type = "price" elif len(set(columns)) == 1: @@ -203,7 +476,7 @@ def load_pricing_adjustments(self, columns, dates, assets): else: adjustment_type = "all" - adjustments = self.load_adjustments( + adjustments = await self.load_adjustments( dates, assets, should_include_splits=True, @@ -233,7 +506,7 @@ def get_adjustments_for_sid(self, table_name, sid): for adjustment in adjustments_for_sid ] - def get_dividends_with_ex_date(self, assets, date, asset_finder): + def get_dividends_with_ex_date(self, assets, date): # seconds = date.value / int(1e9) return [] c = self.conn.cursor() @@ -257,10 +530,10 @@ def get_dividends_with_ex_date(self, assets, date, asset_finder): return divs - def get_stock_dividends(self, sid: int, trading_days: pl.Series) -> list[Dividend]: + async def get_stock_dividends(self, sid: int, trading_days: pl.Series) -> list[Dividend]: return [] - def get_stock_dividends_with_ex_date(self, assets, date, asset_finder): + async def get_stock_dividends_with_ex_date(self, assets, date): # seconds = date.value / int(1e9) return [] @@ -672,7 +945,7 @@ def write(self, splits=None, mergers=None, dividends=None, stock_dividends=None) self.write_frame("mergers", mergers) self.write_dividend_data(dividends, stock_dividends) - def get_splits(self, assets: frozenset[Asset], dt: datetime.date): + async def get_splits(self, assets: frozenset[Asset], dt: datetime.date): """Returns any splits for the given sids and the given dt. Parameters diff --git a/ziplime/assets/repositories/sqlalchemy_asset_repository.py b/ziplime/assets/repositories/sqlalchemy_asset_repository.py index b3d3195b01..96cbcfcbc1 100644 --- a/ziplime/assets/repositories/sqlalchemy_asset_repository.py +++ b/ziplime/assets/repositories/sqlalchemy_asset_repository.py @@ -1,8 +1,6 @@ import datetime -import os from collections import deque from functools import partial, lru_cache -from numbers import Integral from operator import attrgetter from pathlib import Path from typing import Any, Self @@ -45,7 +43,6 @@ SameSymbolUsedAcrossCountries, SidsNotFound, SymbolNotFound, - NotAssetConvertible, ) from ziplime.utils.functional import invert from ziplime.utils.numpy_utils import as_column @@ -105,15 +102,6 @@ def __init__(self, db_url: str, future_chain_predicates): self._asset_lifetimes = {} self.migrate() - # def get_database_path(self, bundle_name: str, bundle_version: str) -> Path: - # return Path(self._base_storage_path, "assets", f"{bundle_name}_{bundle_version}.sqlite") - - # def get_database_url_sync(self, bundle_name: str, bundle_version: str) -> str: - # return f"sqlite:////{self.get_database_path(bundle_name=bundle_name, bundle_version=bundle_version)}" - # - # def get_database_url_async(self, bundle_name: str, bundle_version: str) -> str: - # return f"sqlite+aiosqlite:////{self.get_database_path(bundle_name=bundle_name, bundle_version=bundle_version)}" - @property @lru_cache def session_maker(self) -> async_sessionmaker[AsyncSession]: @@ -213,6 +201,13 @@ async def get_exchange_by_name(self, exchange_name: str) -> ExchangeInfo | None: exchange = (await session.execute(q)).scalar_one_or_none() return exchange + @aiocache.cached(cache=Cache.MEMORY) + async def get_exchanges_by_country_codes(self, country_codes: frozenset[str]) -> list[ExchangeInfo]: + async with self.session_maker() as session: + q = select(ExchangeInfo).where(ExchangeInfo.country_code.in_(country_codes)) + exchanges = list((await session.execute(q)).scalars()) + return exchanges + async def save_equities(self, equities: list[Equity]) -> None: assets_db = [] asset_routers = [] @@ -338,7 +333,27 @@ async def get_asset_by_sid(self, sid: int) -> AssetModel | None: async def get_assets_by_sids(self, sids: list[int]) -> list[Asset]: assets_by_sid = await self.get_all_assets() - return [assets_by_sid.get(sid, None) for sid in sids] + assets = [assets_by_sid.get(sid, None) for sid in sids] + return [Equity( + sid=asset.sid, + asset_name=asset.asset_name, + start_date=asset.start_date, + first_traded=asset.first_traded, + end_date=asset.end_date, + auto_close_date=asset.auto_close_date, + symbol_mapping={ + equity_mapping.exchange: EquitySymbolMapping( + company_symbol=equity_mapping.company_symbol, + symbol=equity_mapping.symbol, + exchange_name=equity_mapping.exchange, + share_class_symbol=equity_mapping.share_class_symbol, + end_date=equity_mapping.end_date, + start_date=equity_mapping.start_date + ) + for equity_mapping in asset.equity_symbol_mappings + }, + mic=asset.mic + ) for asset in assets] async def get_symbols_universe(self, symbol: str) -> SymbolsUniverse | None: universes_by_symbol = await self.get_all_universes() @@ -465,38 +480,6 @@ async def get_equity_by_symbol(self, symbol: str, exchange_name: str) -> Equity if equities: return equities[0] return None - # async with self.session_maker() as session: - # q_equity_symbol_mapping = select(EquitySymbolMappingModel).where( - # EquitySymbolMappingModel.exchange == exchange_name, - # EquitySymbolMappingModel.symbol == symbol) - # equity_mapping = (await session.execute(q_equity_symbol_mapping)).scalar_one_or_none() - # if equity_mapping is None: - # return None - # q_equity = select(EquityModel).where(EquityModel.sid == equity_mapping.sid).options( - # selectinload(EquityModel.asset_router)).options(selectinload(EquityModel.equity_symbol_mappings)) - # asset: EquityModel = (await session.execute(q_equity)).scalar_one_or_none() - # - # if asset is None: - # return None - # return Equity( - # sid=asset.sid, - # asset_name=asset.asset_name, - # start_date=asset.start_date, - # first_traded=asset.first_traded, - # end_date=asset.end_date, - # auto_close_date=asset.auto_close_date, - # symbol_mapping={ - # equity_mapping.exchange: EquitySymbolMapping( - # company_symbol=equity_mapping.company_symbol, - # symbol=equity_mapping.symbol, - # exchange_name=equity_mapping.exchange, - # share_class_symbol=equity_mapping.share_class_symbol, - # end_date=equity_mapping.end_date, - # start_date=equity_mapping.start_date - # ) - # for equity_mapping in asset.equity_symbol_mappings - # } - # ) def migrate(self) -> None: alembic_dir_path = Path(pathlib.Path(__file__).parent.parent.parent, "alembic") @@ -633,7 +616,7 @@ async def retrieve_all(self, sids: list[int], default_none: bool = False): """ async with self.session_maker() as session: - q = select(AssetModel).where(AssetModel.sid.in_(sids)) + q = select(AssetRouter).where(AssetModel.sid.in_(sids)) assets = (await session.execute(q)).scalars() return list(assets) @@ -1054,57 +1037,6 @@ def lookup_symbol(self, symbol: str, as_of_date: datetime.datetime, as_of_date, ) - # def lookup_symbols(self, symbols: list[str], as_of_date: datetime.datetime, - # country_code: str | None = None): - # """Lookup a list of equities by symbol. - # - # Equivalent to:: - # - # [finder.lookup_symbol(s, as_of, fuzzy) for s in symbols] - # - # but potentially faster because repeated lookups are memoized. - # - # Parameters - # ---------- - # symbols : sequence[str] - # Sequence of ticker symbols to resolve. - # as_of_date : datetime.datetime - # Forwarded to ``lookup_symbol``. - # country_code : str or None, optional - # The country to limit searches to. If not provided, the search will - # span all countries which increases the likelihood of an ambiguous - # lookup. - # - # Returns - # ------- - # equities : list[Equity] - # """ - # if not symbols: - # return [] - # - # multi_country = country_code is None - # f = self._lookup_symbol_strict - # mapping = self._choose_symbol_ownership_map(country_code) - # - # if mapping is None: - # raise SymbolNotFound(symbol=symbols[0]) - # - # memo = {} - # out = [] - # append_output = out.append - # for sym in symbols: - # if sym in memo: - # append_output(memo[sym]) - # else: - # equity = memo[sym] = f( - # mapping, - # multi_country, - # sym, - # as_of_date, - # ) - # append_output(equity) - # return out - def lookup_future_symbol(self, symbol: str): """Lookup a future contract by symbol. @@ -1138,102 +1070,6 @@ def lookup_future_symbol(self, symbol: str): raise SymbolNotFound(symbol=symbol) return self.retrieve_asset(sid=data["sid"]) - # def lookup_by_supplementary_field(self, field_name: str, value: float, as_of_date: datetime.datetime): - # try: - # owners = self.equity_supplementary_map[ - # field_name, - # value, - # ] - # assert owners, "empty owners list for field %r (sid: %r)" % ( - # field_name, - # value, - # ) - # except KeyError as exc: - # # no equity has ever held this value - # raise ValueNotFoundForField(field=field_name, value=value) from exc - # - # if not as_of_date: - # if len(owners) > 1: - # # more than one equity has held this value, this is ambigious - # # without the date - # raise MultipleValuesFoundForField( - # field=field_name, - # value=value, - # options=set( - # map( - # compose(self.retrieve_asset, attrgetter("sid")), - # owners, - # ) - # ), - # ) - # # exactly one equity has ever held this value, we may resolve - # # without the date - # return self.retrieve_asset(owners[0].sid) - # - # for start, end, sid, _ in owners: - # if start <= as_of_date < end: - # # find the equity that owned it on the given asof date - # return self.retrieve_asset(sid) - # - # # no equity held the value on the given asof date - # raise ValueNotFoundForField(field=field_name, value=value) - - # def get_supplementary_field(self, sid: int, field_name: str, as_of_date: datetime.datetime): - # """Get the value of a supplementary field for an asset. - # - # Parameters - # ---------- - # sid : int - # The sid of the asset to query. - # field_name : str - # Name of the supplementary field. - # as_of_date : datetime.datetime, None - # The last known value on this date is returned. If None, a - # value is returned only if we've only ever had one value for - # this sid. If None and we've had multiple values, - # MultipleValuesFoundForSid is raised. - # - # Raises - # ------ - # NoValueForSid - # If we have no values for this asset, or no values was known - # on this as_of_date. - # MultipleValuesFoundForSid - # If we have had multiple values for this asset over time, and - # None was passed for as_of_date. - # """ - # try: - # periods = self.equity_supplementary_map_by_sid[ - # field_name, - # sid, - # ] - # assert periods, "empty periods list for field %r and sid %r" % ( - # field_name, - # sid, - # ) - # except KeyError: - # raise NoValueForSid(field=field_name, sid=sid) from KeyError - # - # if not as_of_date: - # if len(periods) > 1: - # # This equity has held more than one value, this is ambigious - # # without the date - # raise MultipleValuesFoundForSid( - # field=field_name, - # sid=sid, - # options={p.value for p in periods}, - # ) - # # this equity has only ever held this value, we may resolve - # # without the date - # return periods[0].value - # - # for start, end, _, value in periods: - # if start <= as_of_date < end: - # return value - # - # # Could not find a value for this sid on the as_of_date. - # raise NoValueForSid(field=field_name, sid=sid) - def _get_contract_sids(self, root_symbol: str): fc_cols = self.futures_contracts.c with self.engine.connect() as conn: @@ -1314,96 +1150,95 @@ def create_continuous_future(self, root_symbol: str, offset: int, roll_style: st return {None: cf, "mul": mul_cf, "add": add_cf}[adjustment] - # - # def _get_sids(self, tblattr: str) -> list[int]: - # with self.engine.connect() as conn: - # return list(( - # conn.execute(sa.select(getattr(self, tblattr).c.sid)) - # .scalars() - # .fetchall() - # )) - - # @property - # def sids(self) -> list[int]: - # return self._get_sids("asset_router") - - def _lookup_generic_scalar(self, obj: AssetModel, as_of_date: datetime.datetime, country_code: str, - matches: list[AssetModel], - missing: list[AssetModel]): - """ - Convert asset_convertible to an asset. + @aiocache.cached(cache=Cache.MEMORY) + async def _compute_lifetimes(self, country_codes: frozenset[str]) -> Lifetimes: + """Compute and cache a recarray of asset lifetimes""" + sids = starts = ends = [] + async with self.session_maker() as session: + sids_subquery = select(EquitySymbolMappingModel.sid).join( + ExchangeInfo, onclause=ExchangeInfo.exchange == EquitySymbolMappingModel.exchange + ).where(ExchangeInfo.country_code.in_(country_codes)) + q = select( + EquityModel.sid, + EquityModel.start_date, + EquityModel.end_date + ).where(EquityModel.sid.in_(sids_subquery)) + result = list((await session.execute(q))) + if result: + sids, starts, ends = zip(*result) - On success, append to matches. - On failure, append to missing. - """ - result = self._lookup_generic_scalar_helper( - obj=obj, - as_of_date=as_of_date, - country_code=country_code, - ) - if result is not None: - matches.append(result) - else: - missing.append(obj) + sid = np.array(sids, dtype="i8") + start = np.array([datetime.datetime.combine(s, datetime.datetime.min.time(), tzinfo=datetime.timezone.utc).timestamp() for s in starts], dtype="f8") + end = np.array([datetime.datetime.combine(s, datetime.datetime.min.time(), tzinfo=datetime.timezone.utc).timestamp() for s in ends], dtype="f8") + start[np.isnan(start)] = 0 # convert missing starts to 0 + end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX + return Lifetimes(sid, start.astype("i8"), end.astype("i8")) - def _lookup_generic_scalar_helper(self, obj: AssetModel, as_of_date: datetime.datetime, country_code: str): - if isinstance(obj, (AssetModel, ContinuousFuture)): - return obj + async def lifetimes(self, dates: pd.DatetimeIndex, include_start_date: bool, country_codes: list[str]): + """Compute a DataFrame representing asset lifetimes for the specified date + range. - if isinstance(obj, Integral): - try: - return self.retrieve_asset(int(obj)) - except SidsNotFound: - return None + Parameters + ---------- + dates : pd.DatetimeIndex + The dates for which to compute lifetimes. + include_start_date : bool + Whether or not to count the asset as alive on its start_date. - if isinstance(obj, str): - # Try to look up as an equity first. - try: - return self.lookup_symbol( - symbol=obj, as_of_date=as_of_date, country_code=country_code - ) - except SymbolNotFound: - # Fall back to lookup as a Future - try: - # TODO: Support country_code for future_symbols? - return self.lookup_future_symbol(obj) - except SymbolNotFound: - return None + This is useful in a backtesting context where `lifetimes` is being + used to signify "do I have data for this asset as of the morning of + this date?" For many financial metrics, (e.g. daily close), data + isn't available for an asset until the end of the asset's first + day. + country_codes : iterable[str] + The country codes to get lifetimes for. + + Returns + ------- + lifetimes : pd.DataFrame + A frame of dtype bool with `dates` as index and an Int64Index of + assets as columns. The value at `lifetimes.loc[date, asset]` will + be True iff `asset` existed on `date`. If `include_start_date` is + False, then lifetimes.loc[date, asset] will be false when date == + asset.start_date. - raise NotAssetConvertible("Input was %s, not AssetConvertible." % obj) + See Also + -------- + numpy.putmask + ziplime.pipeline.engine.SimplePipelineEngine._compute_root_mask + """ + lifetimes = await self._compute_lifetimes(country_codes=frozenset(country_codes)) + return lifetimes - def _compute_asset_lifetimes(self, **kwargs): + @aiocache.cached(cache=Cache.MEMORY) + async def _compute_asset_lifetimes(self, assets: frozenset[Asset]) -> Lifetimes: """Compute and cache a recarray of asset lifetimes""" - sids = starts = ends = [] - equities_cols = self.equities.c - exchanges_cols = self.exchanges.c - if len(kwargs) == 1: - if "country_codes" in kwargs.keys(): - condt = exchanges_cols.country_code.in_(kwargs["country_codes"]) - if "exchange_names" in kwargs.keys(): - condt = exchanges_cols.exchange.in_(kwargs["exchange_names"]) - - with self.engine.connect() as conn: - results = conn.execute( - sa.select( - equities_cols.sid, - equities_cols.start_date, - equities_cols.end_date, - ).where( - (exchanges_cols.exchange == equities_cols.exchange) & (condt) - ) - ).fetchall() - if results: - sids, starts, ends = zip(*results) + # sids = starts = ends = [] + # async with self.session_maker() as session: + # sids_subquery = select(EquitySymbolMappingModel.sid).join( + # ExchangeInfo, onclause=ExchangeInfo.exchange == EquitySymbolMappingModel.exchange + # ).where(ExchangeInfo.country_code.in_(country_codes)) + # q = select( + # EquityModel.sid, + # EquityModel.start_date, + # EquityModel.end_date + # ).where(EquityModel.sid.in_(sids_subquery)) + # result = list((await session.execute(q))) + # if result: + # sids, starts, ends = zip(*result) + sids = [asset.sid for asset in assets] + starts = [asset.start_date for asset in assets] + ends = [asset.end_date for asset in assets] sid = np.array(sids, dtype="i8") - start = np.array(starts, dtype="f8") - end = np.array(ends, dtype="f8") + start = np.array([datetime.datetime.combine(s, datetime.datetime.min.time(), tzinfo=datetime.timezone.utc).timestamp() for s in starts], dtype="f8") + end = np.array([datetime.datetime.combine(s, datetime.datetime.min.time(), tzinfo=datetime.timezone.utc).timestamp() for s in ends], dtype="f8") start[np.isnan(start)] = 0 # convert missing starts to 0 end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX return Lifetimes(sid, start.astype("i8"), end.astype("i8")) - def lifetimes(self, dates: pd.DatetimeIndex, include_start_date: bool, country_codes: list[str]): + + async def asset_lifetimes(self, assets: list[Asset], dates: pd.DatetimeIndex, include_start_date: bool): """Compute a DataFrame representing asset lifetimes for the specified date range. @@ -1436,29 +1271,8 @@ def lifetimes(self, dates: pd.DatetimeIndex, include_start_date: bool, country_c numpy.putmask ziplime.pipeline.engine.SimplePipelineEngine._compute_root_mask """ - if isinstance(country_codes, str): - raise TypeError( - "Got string {!r} instead of an iterable of strings in " - "AssetFinder.lifetimes.".format(country_codes), - ) - - # normalize to a cache-key so that we can memoize results. - country_codes = frozenset(country_codes) - - lifetimes = self._asset_lifetimes.get(country_codes) - if lifetimes is None: - self._asset_lifetimes[country_codes] = lifetimes = ( - self._compute_asset_lifetimes(country_codes=country_codes) - ) - - raw_dates = as_column(dates.asi8) - if include_start_date: - mask = lifetimes.start <= raw_dates - else: - mask = lifetimes.start < raw_dates - mask &= raw_dates <= lifetimes.end - - return pd.DataFrame(mask, index=dates, columns=lifetimes.sid) + lifetimes = await self._compute_asset_lifetimes(assets=frozenset(assets)) + return lifetimes # def equities_sids_for_country_code(self, country_code: str): # """Return all of the sids for a given country. diff --git a/ziplime/assets/services/asset_service.py b/ziplime/assets/services/asset_service.py index f8a7e638ff..cff326dd93 100644 --- a/ziplime/assets/services/asset_service.py +++ b/ziplime/assets/services/asset_service.py @@ -1,6 +1,7 @@ import datetime import aiocache +import pandas as pd import polars as pl from aiocache import Cache @@ -76,12 +77,45 @@ async def get_commodity_by_symbol(self, symbol: str, exchange_name: str) -> Comm return await self._asset_repository.get_commodity_by_symbol(symbol=symbol, exchange_name=exchange_name) - def get_stock_dividends(self, sid: int, trading_days: pl.Series) -> list[Dividend]: - return self._adjustments_repository.get_stock_dividends(sid=sid, + async def get_stock_dividends(self, sid: int, trading_days: pl.Series) -> list[Dividend]: + return await self._adjustments_repository.get_stock_dividends(sid=sid, trading_days=trading_days) - def get_splits(self, assets: frozenset[Asset], dt: datetime.date): - return self._adjustments_repository.get_splits(assets=assets, dt=dt) + async def get_splits(self, assets: frozenset[Asset], dt: datetime.date): + return await self._adjustments_repository.get_splits(assets=assets, dt=dt) async def get_symbols_universe(self, symbol: str) -> SymbolsUniverse | None: return await self._asset_repository.get_symbols_universe(symbol=symbol) + + async def lifetimes(self, dates: pd.DatetimeIndex, include_start_date: bool, country_codes: list[str]): + # normalize to a cache-key so that we can memoize results. + lifetimes = await self._asset_repository.lifetimes(dates=dates, include_start_date=include_start_date, + country_codes=country_codes) + + raw_dates = dates.view('int64') // 10**9 + if include_start_date: + mask = lifetimes.start[None, :] <= raw_dates[:, None] + else: + mask = lifetimes.start[None, :] < raw_dates[:, None] + mask &= raw_dates[:, None] <= lifetimes.end[None, :] + return pd.DataFrame(mask, index=dates, columns=lifetimes.sid) + + async def asset_lifetimes(self, assets: list[Asset], dates: pd.DatetimeIndex, include_start_date: bool): + # normalize to a cache-key so that we can memoize results. + lifetimes = await self._asset_repository.asset_lifetimes(dates=dates, include_start_date=include_start_date, + assets=assets) + + raw_dates = dates.view('int64') // 10**9 + if include_start_date: + mask = lifetimes.start[None, :] <= raw_dates[:, None] + else: + mask = lifetimes.start[None, :] < raw_dates[:, None] + mask &= raw_dates[:, None] <= lifetimes.end[None, :] + return pd.DataFrame(mask, index=dates, columns=lifetimes.sid) + + + async def retrieve_all(self, sids: list[int], default_none: bool = False): + return await self._asset_repository.retrieve_all(sids=sids, default_none=default_none) + + async def load_pricing_adjustments(self, columns, dates, assets): + return await self._adjustments_repository.load_pricing_adjustments(columns=columns, dates=dates, assets=assets) diff --git a/ziplime/core/algorithm_file.py b/ziplime/core/algorithm_file.py index 00ef3979e0..1929aaacde 100644 --- a/ziplime/core/algorithm_file.py +++ b/ziplime/core/algorithm_file.py @@ -1,12 +1,16 @@ import importlib.util +import logging import sys +import structlog + from ziplime.config.base_algorithm_config import BaseAlgorithmConfig class AlgorithmFile: - def __init__(self, algorithm_file: str, algorithm_config_file: str | None = None): + def __init__(self, algorithm_file: str, algorithm_config_file: str | None = None, + logger: logging.Logger = structlog.get_logger(__name__)): """ Initializes the algorithm environment by loading the specified algorithm script and its optional configuration file. The module is dynamically imported and expected functions are retrieved @@ -35,6 +39,7 @@ def __init__(self, algorithm_file: str, algorithm_config_file: str | None = None script or the base configuration class `BaseAlgorithmConfig`, loaded using the algorithm configuration file or default parameters. """ + def noop(*args, **kwargs): pass @@ -54,7 +59,7 @@ def noop(*args, **kwargs): spec.loader.exec_module(module) else: raise Exception(f"No module found: {algorithm_file}") - + self._logger = logger self.initialize = module.__dict__.get("initialize", noop) self.handle_data = module.__dict__.get("handle_data", noop) self.before_trading_start = module.__dict__.get("before_trading_start", noop) @@ -68,8 +73,13 @@ def noop(*args, **kwargs): if issubclass(obj, BaseAlgorithmConfig) and obj != BaseAlgorithmConfig: custom_config_class = obj break - if custom_config_class is None: + if algorithm_config_file is None or custom_config_class is None: custom_config_class = BaseAlgorithmConfig + if algorithm_config_file is None and custom_config_class is not None: + self._logger.warning( + "Algorithm config file is not specified but custom config class is provided. " + "Configuration file won't be loaded." + ) if algorithm_config_file is not None: with open(algorithm_config_file, "r") as f: config = custom_config_class.model_validate_json(f.read()) diff --git a/ziplime/core/run_simulation.py b/ziplime/core/run_simulation.py index cdb9f8c898..41c6bd710f 100644 --- a/ziplime/core/run_simulation.py +++ b/ziplime/core/run_simulation.py @@ -1,11 +1,8 @@ import datetime -from pathlib import Path +from ziplime.gens.domain.trading_clock import TradingClock from ziplime.utils.calendar_utils import get_calendar -from ziplime.assets.domain.ordered_contracts import CHAIN_PREDICATES -from ziplime.assets.repositories.sqlalchemy_adjustments_repository import SqlAlchemyAdjustmentRepository -from ziplime.assets.repositories.sqlalchemy_asset_repository import SqlAlchemyAssetRepository from ziplime.assets.services.asset_service import AssetService from ziplime.core.algorithm_file import AlgorithmFile from ziplime.data.services.data_source import DataSource @@ -41,7 +38,8 @@ async def run_simulation( benchmark_asset_symbol: str | None = None, benchmark_returns: pl.Series | None = None, equity_commission: EquityCommissionModel | None = None, - future_commission: FutureCommissionModel | None = None + future_commission: FutureCommissionModel | None = None, + clock: TradingClock | None = None ): """ Run a trading algorithm simulation within a defined time period and trading environment. @@ -80,13 +78,13 @@ async def run_simulation( calendar = get_calendar(trading_calendar) algo = AlgorithmFile(algorithm_file=algorithm_file, algorithm_config_file=config_file) - - clock = SimulationClock( - trading_calendar=calendar, - start_date=start_date, - end_date=end_date, - emission_rate=emission_rate, - ) + if clock is None: + clock = SimulationClock( + trading_calendar=calendar, + start_date=start_date, + end_date=end_date, + emission_rate=emission_rate, + ) if equity_commission is None: equity_commission = PerShare( cost=DEFAULT_PER_SHARE_COST, @@ -104,7 +102,7 @@ async def run_simulation( name=default_exchange_name, country_code="US", trading_calendar=calendar, - data_bundle=market_data_source, + data_source=market_data_source, equity_slippage=FixedBasisPointsSlippage(), equity_commission=equity_commission, future_slippage=VolatilityVolumeShare( diff --git a/ziplime/data/adjustments.py b/ziplime/data/adjustments.py index 4387fd6ca2..1169a83aa2 100644 --- a/ziplime/data/adjustments.py +++ b/ziplime/data/adjustments.py @@ -46,258 +46,7 @@ EPOCH = Timestamp(0, tz='UTC') -def _get_sids_from_table(db, - tablename: str, - start_date: int, - end_date: int) -> set: - """Get the unique sids for all adjustments between start_date and end_date - from table `tablename`. - Parameters - ---------- - db : sqlite3.connection - tablename : str - start_date : int (seconds since epoch) - end_date : int (seconds since epoch) - - Returns - ------- - sids : set - Set of sets - """ - - cursor = db.execute( - SID_QUERIES[tablename], - (start_date, end_date), - ) - out = set() - for result in cursor.fetchall(): - out.add(result[0]) - return out - - -def _get_split_sids(db, start_date: int, end_date: int) -> set: - return _get_sids_from_table(db, 'splits', start_date, end_date) - - -def _get_merger_sids(db, start_date: int, end_date: int) -> set: - return _get_sids_from_table(db, 'mergers', start_date, end_date) - - -def _get_dividend_sids(db, start_date: int, end_date: int) -> set: - return _get_sids_from_table(db, 'dividends', start_date, end_date) - - -def _adjustments(adjustments_db, - split_sids: set, - merger_sids: set, - dividends_sids: set, - start_date: int, - end_date: int, - assets: pd.Index): - c = adjustments_db.cursor() - - splits_to_query = [str(a) for a in assets if a in split_sids] - splits_results = [] - while splits_to_query: - query_len = min(len(splits_to_query), SQLITE_MAX_VARIABLE_NUMBER) - query_assets = splits_to_query[:query_len] - t = [str(a) for a in query_assets] - statement = ADJ_QUERY_TEMPLATE.format( - 'splits', - ",".join(['?' for _ in query_assets]), - start_date, - end_date, - ) - c.execute(statement, t) - splits_to_query = splits_to_query[query_len:] - splits_results.extend(c.fetchall()) - - mergers_to_query = [str(a) for a in assets if a in merger_sids] - mergers_results = [] - while mergers_to_query: - query_len = min(len(mergers_to_query), SQLITE_MAX_VARIABLE_NUMBER) - query_assets = mergers_to_query[:query_len] - t = [str(a) for a in query_assets] - statement = ADJ_QUERY_TEMPLATE.format( - 'mergers', - ",".join(['?' for _ in query_assets]), - start_date, - end_date, - ) - c.execute(statement, t) - mergers_to_query = mergers_to_query[query_len:] - mergers_results.extend(c.fetchall()) - - dividends_to_query = [str(a) for a in assets if a in dividends_sids] - dividends_results = [] - while dividends_to_query: - query_len = min(len(dividends_to_query), SQLITE_MAX_VARIABLE_NUMBER) - query_assets = dividends_to_query[:query_len] - t = [str(a) for a in query_assets] - statement = ADJ_QUERY_TEMPLATE.format( - 'dividends', - ",".join(['?' for _ in query_assets]), - start_date, - end_date, - ) - c.execute(statement, t) - dividends_to_query = dividends_to_query[query_len:] - dividends_results.extend(c.fetchall()) - - return splits_results, mergers_results, dividends_results - - -def load_adjustments_from_sqlite(adjustments_db, - dates: pd.DatetimeIndex, - assets: pd.Index, - should_include_splits: bool, - should_include_mergers: bool, - should_include_dividends: bool, - adjustment_type: str): - """Load a dictionary of Adjustment objects from adjustments_db. - - Parameters - ---------- - adjustments_db : sqlite3.Connection - Connection to a sqlite3 table in the format written by - SQLiteAdjustmentWriter. - dates : pd.DatetimeIndex - Dates for which adjustments are needed. - assets : pd.Int64Index - Assets for which adjustments are needed. - should_include_splits : bool - Whether split adjustments should be included. - should_include_mergers : bool - Whether merger adjustments should be included. - should_include_dividends : bool - Whether dividend adjustments should be included. - adjustment_type : str - Whether price adjustments, volume adjustments, or both, should be - included in the output. - - Returns - ------- - adjustments : dict[str -> dict[int -> Adjustment]] - A dictionary containing price and/or volume adjustment mappings from - index to adjustment objects to apply at that index. - """ - - if not (adjustment_type == 'price' or - adjustment_type == 'volume' or - adjustment_type == 'all'): - raise ValueError( - "%s is not a valid adjustment type.\n" - "Valid adjustment types are 'price', 'volume', and 'all'.\n" % ( - adjustment_type, - ) - ) - - should_include_price_adjustments = bool( - adjustment_type == 'all' or adjustment_type == 'price' - ) - should_include_volume_adjustments = bool( - adjustment_type == 'all' or adjustment_type == 'volume' - ) - - if not should_include_price_adjustments: - should_include_mergers = False - should_include_dividends = False - - start_date = timedelta_to_integral_seconds(dates[0] - EPOCH) - end_date = timedelta_to_integral_seconds(dates[-1] - EPOCH) - - if should_include_splits: - split_sids = _get_split_sids( - adjustments_db, - start_date, - end_date, - ) - else: - split_sids = set() - - if should_include_mergers: - merger_sids = _get_merger_sids( - adjustments_db, - start_date, - end_date, - ) - else: - merger_sids = set() - - if should_include_dividends: - dividend_sids = _get_dividend_sids( - adjustments_db, - start_date, - end_date, - ) - else: - dividend_sids = set() - - splits, mergers, dividends = _adjustments( - adjustments_db, - split_sids, - merger_sids, - dividend_sids, - start_date, - end_date, - assets, - ) - - price_adjustments = {} - volume_adjustments = {} - result = {} - asset_ixs = {} # Cache sid lookups here. - date_ixs = {} - - _dates_seconds = \ - dates.values.astype('datetime64[s]').view(int64) - - # Pre-populate date index cache. - for i, dt in enumerate(_dates_seconds): - date_ixs[dt] = i - - # splits affect prices and volumes, volumes is the inverse - for sid, ratio, eff_date in splits: - if eff_date < start_date: - continue - - date_loc = _lookup_dt(date_ixs, eff_date, _dates_seconds) - - if sid not in asset_ixs: - asset_ixs[sid] = assets.get_loc(sid) - asset_ix = asset_ixs[sid] - - if should_include_price_adjustments: - price_adj = Float64Multiply(0, date_loc, asset_ix, asset_ix, ratio) - price_adjustments.setdefault(date_loc, []).append(price_adj) - - if should_include_volume_adjustments: - volume_adj = Float64Multiply( - 0, date_loc, asset_ix, asset_ix, 1.0 / ratio - ) - volume_adjustments.setdefault(date_loc, []).append(volume_adj) - - # mergers and dividends affect prices only - for sid, ratio, eff_date in chain(mergers, dividends): - if eff_date < start_date: - continue - - date_loc = _lookup_dt(date_ixs, eff_date, _dates_seconds) - - if sid not in asset_ixs: - asset_ixs[sid] = assets.get_loc(sid) - asset_ix = asset_ixs[sid] - - price_adj = Float64Multiply(0, date_loc, asset_ix, asset_ix, ratio) - price_adjustments.setdefault(date_loc, []).append(price_adj) - - if should_include_price_adjustments: - result['price'] = price_adjustments - if should_include_volume_adjustments: - result['volume'] = volume_adjustments - - return result def _lookup_dt(dt_cache: dict, diff --git a/ziplime/data/data_sources/grpc/grpc_asset_data_source.py b/ziplime/data/data_sources/grpc/grpc_asset_data_source.py deleted file mode 100644 index 6989242bb5..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_asset_data_source.py +++ /dev/null @@ -1,127 +0,0 @@ -import datetime -import multiprocessing -import os -from typing import Self - -import aiocache -import grpc -import structlog - -import polars as pl -from aiocache import Cache - -from ziplime.assets.entities.asset import Asset -from ziplime.assets.entities.equity import Equity -from ziplime.assets.entities.equity_symbol_mapping import EquitySymbolMapping -from ziplime.assets.models.exchange_info import ExchangeInfo -from ziplime.data.data_sources.asset_data_source import AssetDataSource -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1.auth import auth_service_pb2_grpc, auth_service_pb2 -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1.assets import assets_service_pb2_grpc, \ - assets_service_pb2 - - -class GrpcAssetDataSource(AssetDataSource): - def __init__(self, authorization_token: str, server_url: str, - maximum_threads: int | None = None): - super().__init__() - self._logger = structlog.get_logger(__name__) - self._server_url = server_url - self._authorization_token = authorization_token - if maximum_threads is not None: - self._maximum_threads = min(multiprocessing.cpu_count() * 2, maximum_threads) - else: - self._maximum_threads = multiprocessing.cpu_count() * 2 - - @aiocache.cached(cache=Cache.MEMORY) - async def get_token(self) -> str: - credentials = grpc.ssl_channel_credentials() - async with grpc.aio.secure_channel(self._server_url, credentials) as channel: - stub = auth_service_pb2_grpc.AuthServiceStub(channel) - auth_request = auth_service_pb2.AuthRequest() - auth_request.secret = self._authorization_token - response = await stub.Auth(auth_request) - return response.token - - async def get_assets(self, **kwargs) -> list[Asset]: - request = assets_service_pb2.AssetsRequest() - token = await self.get_token() - metadata = [('authorization', token)] - credentials = grpc.ssl_channel_credentials() - - async with grpc.aio.secure_channel(self._server_url, credentials) as channel: - stub = assets_service_pb2_grpc.AssetsServiceStub(channel) - response_stream = await stub.Assets(request, metadata=metadata) - - self._logger.info(f"Got {len(response_stream.assets)} assets from GRPC asset data source.") - - asset_start_date = datetime.datetime(year=1900, month=1, day=1, tzinfo=datetime.timezone.utc) - asset_end_date = datetime.datetime(year=2099, month=1, day=1, tzinfo=datetime.timezone.utc) - - equities = {} - for asset in response_stream.assets: - if asset.ticker not in equities: - equities[asset.ticker] = Equity( - asset_name=asset.symbol, - symbol_mapping={ - asset.mic: EquitySymbolMapping( - symbol=asset.ticker, - exchange_name=asset.mic, - start_date=asset_start_date, - end_date=asset_end_date, - company_symbol="", - share_class_symbol="" - ) - # "LIME": EquitySymbolMapping( - # symbol=asset.symbol, - # exchange_name="LIME", - # start_date=asset_start_date, - # end_date=asset_end_date, - # company_symbol="", - # share_class_symbol="" - # ) - }, - sid=None, - start_date=asset_start_date, - end_date=asset_end_date, - auto_close_date=asset_end_date, - first_traded=asset_start_date, - mic=asset.mic - ) - else: - equities[asset.ticker].symbol_mapping[asset.mic] =EquitySymbolMapping( - symbol=asset.ticker, - exchange_name=asset.mic, - start_date=asset_start_date, - end_date=asset_end_date, - company_symbol="", - share_class_symbol="" - ) - - return list(equities.values()) - - async def get_exchanges(self, **kwargs) ->list[ExchangeInfo]: - request = assets_service_pb2.ExchangesRequest() - token = await self.get_token() - metadata = [('authorization', token)] - credentials = grpc.ssl_channel_credentials() - - async with grpc.aio.secure_channel(self._server_url, credentials) as channel: - stub = assets_service_pb2_grpc.AssetsServiceStub(channel) - response_stream = await stub.Exchanges(request, metadata=metadata) - exchanges = [ExchangeInfo(exchange=exchange.mic, canonical_name=exchange.name, country_code="US") - for exchange in response_stream.exchanges] - self._logger.info(f"Got {len(exchanges)} exchanges from GRPC asset data source.") - return exchanges - - async def get_constituents(self, index: str) -> pl.DataFrame: - assets = self._limex_client.constituents(index) - return assets - - @classmethod - def from_env(cls) -> Self: - token = os.environ.get("GRPC_TOKEN", None) - server_url = os.environ.get("GRPC_SERVER_URL") - maximum_threads = os.environ.get("GRPC_MAXIMUM_THREADS", None) - if token is None: - raise ValueError("Missing GRPC_TOKEN environment variable.") - return cls(server_url=server_url, authorization_token=token, maximum_threads=maximum_threads) diff --git a/ziplime/data/data_sources/grpc/grpc_data_source.py b/ziplime/data/data_sources/grpc/grpc_data_source.py deleted file mode 100644 index 1fe4e350af..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_data_source.py +++ /dev/null @@ -1,238 +0,0 @@ -import asyncio -import datetime -import multiprocessing -import os -import sys -import time -from typing import Self - -import aiocache -import grpc -import structlog -from aiocache import Cache -from asyncclick import progressbar -from google.type.date_pb2 import Date - -import polars as pl - -from ziplime.data.services.data_bundle_source import DataBundleSource -# from ziplime.data.data_sources.grpc.grpc_stubs.grpc.ta import candles_pb2 -# from ziplime.data.data_sources.grpc.grpc_stubs.grpc.ta import candles_pb2_grpc -# from ziplime.data.data_sources.grpc.grpc_stubs.proto.ta import ta_pb2 -# from ziplime.data.data_sources.grpc.grpc_stubs.proto.common import securityidentifier_pb2 -# -from google.protobuf.timestamp_pb2 import Timestamp -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1.auth import auth_service_pb2_grpc, auth_service_pb2 -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1.marketdata import marketdata_service_pb2_grpc, \ - marketdata_service_pb2 -# from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1 import interval_pb2 -from google.type import interval_pb2 - - -class GrpcDataSource(DataBundleSource): - def __init__(self, authorization_token: str, server_url: str, - maximum_threads: int | None = None): - super().__init__() - self._logger = structlog.get_logger(__name__) - self._server_url = server_url - self._authorization_token = authorization_token - if maximum_threads is not None: - self._maximum_threads = min(multiprocessing.cpu_count() * 2, maximum_threads) - else: - self._maximum_threads = multiprocessing.cpu_count() * 2 - - @aiocache.cached(cache=Cache.MEMORY) - async def get_token(self) -> str: - credentials = grpc.ssl_channel_credentials() - async with grpc.aio.secure_channel(self._server_url, credentials) as channel: - stub = auth_service_pb2_grpc.AuthServiceStub(channel) - auth_request = auth_service_pb2.AuthRequest() - auth_request.secret = self._authorization_token - response = await stub.Auth(auth_request) - return response.token - - def _get_timeframe(self, frequency: datetime.timedelta) -> str: - if frequency <= datetime.timedelta(minutes=1): - return "TIME_FRAME_M1" - elif frequency <= datetime.timedelta(minutes=5): - return "TIME_FRAME_M5" - elif frequency <= datetime.timedelta(minutes=15): - return "TIME_FRAME_M15" - elif frequency <= datetime.timedelta(minutes=30): - return "TIME_FRAME_M30" - elif frequency <= datetime.timedelta(hours=1): - return "TIME_FRAME_H1" - elif frequency <= datetime.timedelta(hours=2): - return "TIME_FRAME_H2" - elif frequency <= datetime.timedelta(hours=4): - return "TIME_FRAME_H4" - elif frequency <= datetime.timedelta(hours=8): - return "TIME_FRAME_H8" - elif frequency <= datetime.timedelta(days=1): - return "TIME_FRAME_D" - elif frequency <= datetime.timedelta(weeks=1): - return "TIME_FRAME_W" - elif frequency <= datetime.timedelta(days=31): - return "TIME_FRAME_MN" - elif frequency <= datetime.timedelta(days=95): - return "TIME_FRAME_QR" - - raise ValueError(f"Unsupported frequency for Yahoo Finance {frequency}") - - async def fetch_historical_lime_trader_data(self, - channel: grpc.aio.Channel, - date_from: datetime.datetime, - date_to: datetime.datetime, - symbol: str, - frequency: datetime.timedelta, - ) -> tuple[pl.DataFrame, float, float]: - duration_start = time.time() - token = await self.get_token() - - stub = marketdata_service_pb2_grpc.MarketDataServiceStub(channel) - metadata = [('authorization', token)] - if "@" in symbol: - ticker, mic = symbol.split("@") - else: - ticker, mic = symbol, "XNGS" - timestamp_from = Timestamp() - timestamp_to = Timestamp() - timestamp_from.FromDatetime(date_from) - timestamp_to.FromDatetime(date_to) - interval = interval_pb2.Interval(start_time=timestamp_from, end_time=timestamp_to) - - bars_request = marketdata_service_pb2.BarsRequest( - timeframe=self._get_timeframe(frequency=frequency), - interval=interval) - bars_request.symbol = f"{ticker}@{mic}" - self._logger.info(f"Fetching market data for symbol {symbol} - {date_from} to {date_to}", - symbol=symbol, start_date=date_from, end_date=date_to) - total_requests_time = 0 - request_start = time.time() - response = await stub.Bars(bars_request, metadata=metadata) - duration = time.time() - request_start - total_requests_time += duration - - rows = [ - { - "date": datetime.datetime.fromtimestamp(candle.timestamp.seconds, - tz=date_from.tzinfo), - "open": float(candle.open.value), - "high": float(candle.high.value), - "low": float(candle.low.value), - "close": float(candle.close.value), - "volume": int(float(candle.volume.value)), - "exchange": mic, - "exchange_country": "US", - "price": float(candle.close.value), - "symbol": ticker, - } for candle in response.bars - ] - self._logger.info( - f"Fetched market data for symbol {symbol} - {date_from} to {date_to} in {duration:.2f}", - symbol=symbol, start_date=date_from, end_date=date_to, duration=duration) - - try: - - df = pl.DataFrame(rows, schema=[("open", pl.Float64()), ("close", pl.Float64()), - ("price", pl.Float64()), - ("high", pl.Float64()), ("low", pl.Float64()), - ("volume", pl.Float64()), - ("date", pl.Datetime(time_zone=date_from.tzinfo)), ("exchange", pl.String), - ("exchange_country", pl.String), ("symbol", pl.String) - ]) - duration_total = time.time() - duration_start - - self._logger.info( - f"Retrieved {len(rows)} candles for {symbol} in {duration_total:.2f}s. " - f"Total requests time: {total_requests_time:.2f}s", - total_duration=duration_total, requests_duration=total_requests_time - ) - - return df, duration_total, total_requests_time - except grpc.RpcError as e: - self._logger.exception(f"Failed to get day candles for {symbol}") - raise - - async def get_data(self, symbols: list[str], - frequency: datetime.timedelta, - date_from: datetime.datetime, - date_to: datetime.datetime, - **kwargs - ) -> pl.DataFrame: - async def fetch_historical(symbol: str, start_date: datetime.datetime, - end_date: datetime.datetime) -> tuple[pl.DataFrame | None, float, float]: - try: - credentials = grpc.ssl_channel_credentials() - async with grpc.aio.secure_channel(self._server_url, credentials) as channel: - - result, duration, requests_time = await self.fetch_historical_lime_trader_data(channel=channel, - date_from=start_date, - date_to=end_date, - symbol=symbol, - frequency=frequency) - return result, duration, requests_time - except Exception as e: - self._logger.exception( - f"Exception fetching historical data for symbol {symbol}, date_from={start_date}, date_to={end_date}. Skipping." - ) - return None, 0, 0 - - total_days = (date_to - date_from).days - final = pl.DataFrame() - - with progressbar(length=len(symbols) * total_days, label="Downloading historical data from Lime Trader", - file=sys.stdout) as pbar: - - if frequency >= datetime.timedelta(days=1): - maximum_batch = datetime.timedelta(days=7200) - elif frequency >= datetime.timedelta(hours=1): - maximum_batch = datetime.timedelta(days=365) - elif frequency >= datetime.timedelta(minutes=1): - maximum_batch = datetime.timedelta(days=180) - elif frequency >= datetime.timedelta(seconds=1): - maximum_batch = datetime.timedelta(days=30) - - tasks = [] - batch_start_date = date_from - while batch_start_date < date_to: - - batch_end_date = batch_start_date + maximum_batch - if batch_end_date > date_to: - batch_end_date = date_to - - tasks.extend( - fetch_historical( - symbol=symbol, start_date=batch_start_date, end_date=batch_end_date - ) for symbol in symbols - ) - batch_start_date = batch_end_date - - total_duration = 0 - total_requests_duration = 0 - res = await asyncio.gather(*tasks) - - for item in res: - df, duration, requests_duration = item - total_duration += duration - total_requests_duration += requests_duration - pbar.update(total_days) - if df is None: - continue - final = pl.concat([final, df]) - self._logger.info( - f"Retrieved {len(final)} candles for {symbols} in {total_duration:.2f}s. " - f"Total requests time: {total_requests_duration:.2f}s", - total_duration=total_duration, requests_duration=total_requests_duration - ) - - return final - - @classmethod - def from_env(cls) -> Self: - token = os.environ.get("GRPC_TOKEN", None) - server_url = os.environ.get("GRPC_SERVER_URL") - maximum_threads = os.environ.get("GRPC_MAXIMUM_THREADS", None) - if token is None: - raise ValueError("Missing GRPC_TOKEN environment variable.") - return cls(server_url=server_url, authorization_token=token, maximum_threads=maximum_threads) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/accounts/accounts_service_pb2.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/accounts/accounts_service_pb2.py deleted file mode 100644 index af72333bc2..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/accounts/accounts_service_pb2.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: grpc/tradeapi/v1/accounts/accounts_service.proto -# Protobuf Python Version: 6.31.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 31, - 1, - '', - 'grpc/tradeapi/v1/accounts/accounts_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.type import decimal_pb2 as google_dot_type_dot_decimal__pb2 -from google.type import interval_pb2 as google_dot_type_dot_interval__pb2 -from google.type import money_pb2 as google_dot_type_dot_money__pb2 -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1 import side_pb2 as grpc_dot_tradeapi_dot_v1_dot_side__pb2 -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1 import trade_pb2 as grpc_dot_tradeapi_dot_v1_dot_trade__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n0grpc/tradeapi/v1/accounts/accounts_service.proto\x12\x19grpc.tradeapi.v1.accounts\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19google/type/decimal.proto\x1a\x1agoogle/type/interval.proto\x1a\x17google/type/money.proto\x1a\x1bgrpc/tradeapi/v1/side.proto\x1a\x1cgrpc/tradeapi/v1/trade.proto\"\'\n\x11GetAccountRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\"\xb1\x03\n\x12GetAccountResponse\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12$\n\x06\x65quity\x18\x04 \x01(\x0b\x32\x14.google.type.Decimal\x12/\n\x11unrealized_profit\x18\x05 \x01(\x0b\x32\x14.google.type.Decimal\x12\x36\n\tpositions\x18\x06 \x03(\x0b\x32#.grpc.tradeapi.v1.accounts.Position\x12 \n\x04\x63\x61sh\x18\x07 \x03(\x0b\x32\x12.google.type.Money\x12\x35\n\x0cportfolio_mc\x18\x08 \x01(\x0b\x32\x1d.grpc.tradeapi.v1.accounts.MCH\x00\x12\x37\n\rportfolio_mct\x18\t \x01(\x0b\x32\x1e.grpc.tradeapi.v1.accounts.MCTH\x00\x12;\n\x0fportfolio_forts\x18\n \x01(\x0b\x32 .grpc.tradeapi.v1.accounts.FORTSH\x00\x42\x0b\n\tportfolio\"\x92\x01\n\x02MC\x12,\n\x0e\x61vailable_cash\x18\x01 \x01(\x0b\x32\x14.google.type.Decimal\x12,\n\x0einitial_margin\x18\x02 \x01(\x0b\x32\x14.google.type.Decimal\x12\x30\n\x12maintenance_margin\x18\x03 \x01(\x0b\x32\x14.google.type.Decimal\"\x05\n\x03MCT\"c\n\x05\x46ORTS\x12,\n\x0e\x61vailable_cash\x18\x01 \x01(\x0b\x32\x14.google.type.Decimal\x12,\n\x0emoney_reserved\x18\x02 \x01(\x0b\x32\x14.google.type.Decimal\"[\n\rTradesRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12\r\n\x05limit\x18\x02 \x01(\x05\x12\'\n\x08interval\x18\x03 \x01(\x0b\x32\x15.google.type.Interval\"@\n\x0eTradesResponse\x12.\n\x06trades\x18\x01 \x03(\x0b\x32\x1e.grpc.tradeapi.v1.AccountTrade\"a\n\x13TransactionsRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12\r\n\x05limit\x18\x02 \x01(\x05\x12\'\n\x08interval\x18\x03 \x01(\x0b\x32\x15.google.type.Interval\"T\n\x14TransactionsResponse\x12<\n\x0ctransactions\x18\x01 \x03(\x0b\x32&.grpc.tradeapi.v1.accounts.Transaction\"\xa5\x02\n\x08Position\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12&\n\x08quantity\x18\x02 \x01(\x0b\x32\x14.google.type.Decimal\x12+\n\raverage_price\x18\x03 \x01(\x0b\x32\x14.google.type.Decimal\x12+\n\rcurrent_price\x18\x04 \x01(\x0b\x32\x14.google.type.Decimal\x12\x30\n\x12maintenance_margin\x18\x05 \x01(\x0b\x32\x14.google.type.Decimal\x12\'\n\tdaily_pnl\x18\x06 \x01(\x0b\x32\x14.google.type.Decimal\x12,\n\x0eunrealized_pnl\x18\x07 \x01(\x0b\x32\x14.google.type.Decimal\"\x85\x05\n\x0bTransaction\x12\n\n\x02id\x18\x01 \x01(\t\x12\x14\n\x08\x63\x61tegory\x18\x02 \x01(\tB\x02\x18\x01\x12-\n\ttimestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06symbol\x18\x05 \x01(\t\x12\"\n\x06\x63hange\x18\x06 \x01(\x0b\x32\x12.google.type.Money\x12;\n\x05trade\x18\x07 \x01(\x0b\x32,.grpc.tradeapi.v1.accounts.Transaction.Trade\x12X\n\x14transaction_category\x18\x08 \x01(\x0e\x32:.grpc.tradeapi.v1.accounts.Transaction.TransactionCategory\x12\x18\n\x10transaction_name\x18\t \x01(\t\x1a\x80\x01\n\x05Trade\x12\"\n\x04size\x18\x01 \x01(\x0b\x32\x14.google.type.Decimal\x12#\n\x05price\x18\x02 \x01(\x0b\x32\x14.google.type.Decimal\x12.\n\x10\x61\x63\x63rued_interest\x18\x03 \x01(\x0b\x32\x14.google.type.Decimal\"\xbc\x01\n\x13TransactionCategory\x12\n\n\x06OTHERS\x10\x00\x12\x0b\n\x07\x44\x45POSIT\x10\x01\x12\x0c\n\x08WITHDRAW\x10\x02\x12\n\n\x06INCOME\x10\x05\x12\x0e\n\nCOMMISSION\x10\x07\x12\x07\n\x03TAX\x10\x08\x12\x0f\n\x0bINHERITANCE\x10\t\x12\x0c\n\x08TRANSFER\x10\x0b\x12\x18\n\x14\x43ONTRACT_TERMINATION\x10\x0c\x12\x0c\n\x08OUTCOMES\x10\r\x12\x08\n\x04\x46INE\x10\x0f\x12\x08\n\x04LOAN\x10\x13\x32\xcc\x03\n\x0f\x41\x63\x63ountsService\x12\x8c\x01\n\nGetAccount\x12,.grpc.tradeapi.v1.accounts.GetAccountRequest\x1a-.grpc.tradeapi.v1.accounts.GetAccountResponse\"!\x82\xd3\xe4\x93\x02\x1b\x12\x19/v1/accounts/{account_id}\x12\x87\x01\n\x06Trades\x12(.grpc.tradeapi.v1.accounts.TradesRequest\x1a).grpc.tradeapi.v1.accounts.TradesResponse\"(\x82\xd3\xe4\x93\x02\"\x12 /v1/accounts/{account_id}/trades\x12\x9f\x01\n\x0cTransactions\x12..grpc.tradeapi.v1.accounts.TransactionsRequest\x1a/.grpc.tradeapi.v1.accounts.TransactionsResponse\".\x82\xd3\xe4\x93\x02(\x12&/v1/accounts/{account_id}/transactionsB*P\x01Z&trade_api/v1/accounts/accounts_serviceb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'grpc.tradeapi.v1.accounts.accounts_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'P\001Z&trade_api/v1/accounts/accounts_service' - _globals['_TRANSACTION'].fields_by_name['category']._loaded_options = None - _globals['_TRANSACTION'].fields_by_name['category']._serialized_options = b'\030\001' - _globals['_ACCOUNTSSERVICE'].methods_by_name['GetAccount']._loaded_options = None - _globals['_ACCOUNTSSERVICE'].methods_by_name['GetAccount']._serialized_options = b'\202\323\344\223\002\033\022\031/v1/accounts/{account_id}' - _globals['_ACCOUNTSSERVICE'].methods_by_name['Trades']._loaded_options = None - _globals['_ACCOUNTSSERVICE'].methods_by_name['Trades']._serialized_options = b'\202\323\344\223\002\"\022 /v1/accounts/{account_id}/trades' - _globals['_ACCOUNTSSERVICE'].methods_by_name['Transactions']._loaded_options = None - _globals['_ACCOUNTSSERVICE'].methods_by_name['Transactions']._serialized_options = b'\202\323\344\223\002(\022&/v1/accounts/{account_id}/transactions' - _globals['_GETACCOUNTREQUEST']._serialized_start=281 - _globals['_GETACCOUNTREQUEST']._serialized_end=320 - _globals['_GETACCOUNTRESPONSE']._serialized_start=323 - _globals['_GETACCOUNTRESPONSE']._serialized_end=756 - _globals['_MC']._serialized_start=759 - _globals['_MC']._serialized_end=905 - _globals['_MCT']._serialized_start=907 - _globals['_MCT']._serialized_end=912 - _globals['_FORTS']._serialized_start=914 - _globals['_FORTS']._serialized_end=1013 - _globals['_TRADESREQUEST']._serialized_start=1015 - _globals['_TRADESREQUEST']._serialized_end=1106 - _globals['_TRADESRESPONSE']._serialized_start=1108 - _globals['_TRADESRESPONSE']._serialized_end=1172 - _globals['_TRANSACTIONSREQUEST']._serialized_start=1174 - _globals['_TRANSACTIONSREQUEST']._serialized_end=1271 - _globals['_TRANSACTIONSRESPONSE']._serialized_start=1273 - _globals['_TRANSACTIONSRESPONSE']._serialized_end=1357 - _globals['_POSITION']._serialized_start=1360 - _globals['_POSITION']._serialized_end=1653 - _globals['_TRANSACTION']._serialized_start=1656 - _globals['_TRANSACTION']._serialized_end=2301 - _globals['_TRANSACTION_TRADE']._serialized_start=1982 - _globals['_TRANSACTION_TRADE']._serialized_end=2110 - _globals['_TRANSACTION_TRANSACTIONCATEGORY']._serialized_start=2113 - _globals['_TRANSACTION_TRANSACTIONCATEGORY']._serialized_end=2301 - _globals['_ACCOUNTSSERVICE']._serialized_start=2304 - _globals['_ACCOUNTSSERVICE']._serialized_end=2764 -# @@protoc_insertion_point(module_scope) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/accounts/accounts_service_pb2_grpc.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/accounts/accounts_service_pb2_grpc.py deleted file mode 100644 index 8cc119e57e..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/accounts/accounts_service_pb2_grpc.py +++ /dev/null @@ -1,169 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc -import warnings - -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1.accounts import accounts_service_pb2 as grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2 - -GRPC_GENERATED_VERSION = '1.74.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in grpc/tradeapi/v1/accounts/accounts_service_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) - - -class AccountsServiceStub(object): - def __init__(self, channel): - self.GetAccount = channel.unary_unary( - '/grpc.tradeapi.v1.accounts.AccountsService/GetAccount', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.GetAccountRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.GetAccountResponse.FromString, - _registered_method=True) - self.Trades = channel.unary_unary( - '/grpc.tradeapi.v1.accounts.AccountsService/Trades', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TradesRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TradesResponse.FromString, - _registered_method=True) - self.Transactions = channel.unary_unary( - '/grpc.tradeapi.v1.accounts.AccountsService/Transactions', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TransactionsRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TransactionsResponse.FromString, - _registered_method=True) - - -class AccountsServiceServicer(object): - def GetAccount(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Trades(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Transactions(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_AccountsServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetAccount': grpc.unary_unary_rpc_method_handler( - servicer.GetAccount, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.GetAccountRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.GetAccountResponse.SerializeToString, - ), - 'Trades': grpc.unary_unary_rpc_method_handler( - servicer.Trades, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TradesRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TradesResponse.SerializeToString, - ), - 'Transactions': grpc.unary_unary_rpc_method_handler( - servicer.Transactions, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TransactionsRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TransactionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'grpc.tradeapi.v1.accounts.AccountsService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('grpc.tradeapi.v1.accounts.AccountsService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class AccountsService(object): - @staticmethod - def GetAccount(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.accounts.AccountsService/GetAccount', - grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.GetAccountRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.GetAccountResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Trades(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.accounts.AccountsService/Trades', - grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TradesRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TradesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Transactions(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.accounts.AccountsService/Transactions', - grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TransactionsRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_accounts_dot_accounts__service__pb2.TransactionsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/assets/__init__.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/assets/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/assets/assets_service_pb2.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/assets/assets_service_pb2.py deleted file mode 100644 index e8ff1280af..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/assets/assets_service_pb2.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: grpc/tradeapi/v1/assets/assets_service.proto -# Protobuf Python Version: 6.31.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 31, - 1, - '', - 'grpc/tradeapi/v1/assets/assets_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.type import date_pb2 as google_dot_type_dot_date__pb2 -from google.type import decimal_pb2 as google_dot_type_dot_decimal__pb2 -from google.type import interval_pb2 as google_dot_type_dot_interval__pb2 -from google.type import money_pb2 as google_dot_type_dot_money__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,grpc/tradeapi/v1/assets/assets_service.proto\x12\x17grpc.tradeapi.v1.assets\x1a\x1cgoogle/api/annotations.proto\x1a\x16google/type/date.proto\x1a\x19google/type/decimal.proto\x1a\x1agoogle/type/interval.proto\x1a\x17google/type/money.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x12\n\x10\x45xchangesRequest\"I\n\x11\x45xchangesResponse\x12\x34\n\texchanges\x18\x01 \x03(\x0b\x32!.grpc.tradeapi.v1.assets.Exchange\"\x0f\n\rAssetsRequest\"@\n\x0e\x41ssetsResponse\x12.\n\x06\x61ssets\x18\x01 \x03(\x0b\x32\x1e.grpc.tradeapi.v1.assets.Asset\"5\n\x0fGetAssetRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x12\n\naccount_id\x18\x02 \x01(\t\"\xec\x01\n\x10GetAssetResponse\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0e\n\x06ticker\x18\x03 \x01(\t\x12\x0b\n\x03mic\x18\x04 \x01(\t\x12\x0c\n\x04isin\x18\x05 \x01(\t\x12\x0c\n\x04type\x18\x06 \x01(\t\x12\x0c\n\x04name\x18\x07 \x01(\t\x12\x10\n\x08\x64\x65\x63imals\x18\n \x01(\x05\x12\x10\n\x08min_step\x18\x0b \x01(\x03\x12&\n\x08lot_size\x18\t \x01(\x0b\x32\x14.google.type.Decimal\x12*\n\x0f\x65xpiration_date\x18\x0c \x01(\x0b\x32\x11.google.type.Date\";\n\x15GetAssetParamsRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x12\n\naccount_id\x18\x02 \x01(\t\"\xde\x03\n\x16GetAssetParamsResponse\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x12\n\naccount_id\x18\x02 \x01(\t\x12\x11\n\ttradeable\x18\x03 \x01(\x08\x12\x33\n\x08longable\x18\x04 \x01(\x0b\x32!.grpc.tradeapi.v1.assets.Longable\x12\x35\n\tshortable\x18\x05 \x01(\x0b\x32\".grpc.tradeapi.v1.assets.Shortable\x12,\n\x0elong_risk_rate\x18\x06 \x01(\x0b\x32\x14.google.type.Decimal\x12/\n\x0flong_collateral\x18\x07 \x01(\x0b\x32\x12.google.type.MoneyB\x02\x18\x01\x12-\n\x0fshort_risk_rate\x18\x08 \x01(\x0b\x32\x14.google.type.Decimal\x12\x30\n\x10short_collateral\x18\t \x01(\x0b\x32\x12.google.type.MoneyB\x02\x18\x01\x12/\n\x13long_initial_margin\x18\n \x01(\x0b\x32\x12.google.type.Money\x12\x30\n\x14short_initial_margin\x18\x0b \x01(\x0b\x32\x12.google.type.Money\"0\n\x13OptionsChainRequest\x12\x19\n\x11underlying_symbol\x18\x01 \x01(\t\"X\n\x14OptionsChainResponse\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x30\n\x07options\x18\x02 \x03(\x0b\x32\x1f.grpc.tradeapi.v1.assets.Option\"!\n\x0fScheduleRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\"\xab\x01\n\x10ScheduleResponse\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x44\n\x08sessions\x18\x02 \x03(\x0b\x32\x32.grpc.tradeapi.v1.assets.ScheduleResponse.Sessions\x1a\x41\n\x08Sessions\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\'\n\x08interval\x18\x02 \x01(\x0b\x32\x15.google.type.Interval\"\x0e\n\x0c\x43lockRequest\">\n\rClockResponse\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"%\n\x08\x45xchange\x12\x0b\n\x03mic\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"j\n\x05\x41sset\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0e\n\x06ticker\x18\x03 \x01(\t\x12\x0b\n\x03mic\x18\x04 \x01(\t\x12\x0c\n\x04isin\x18\x05 \x01(\t\x12\x0c\n\x04type\x18\x06 \x01(\t\x12\x0c\n\x04name\x18\x07 \x01(\t\"\xbc\x03\n\x06Option\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x32\n\x04type\x18\x02 \x01(\x0e\x32$.grpc.tradeapi.v1.assets.Option.Type\x12+\n\rcontract_size\x18\x04 \x01(\x0b\x32\x14.google.type.Decimal\x12*\n\x0ftrade_first_day\x18\x05 \x01(\x0b\x32\x11.google.type.Date\x12)\n\x0etrade_last_day\x18\x06 \x01(\x0b\x32\x11.google.type.Date\x12$\n\x06strike\x18\x07 \x01(\x0b\x32\x14.google.type.Decimal\x12(\n\nmultiplier\x18\t \x01(\x0b\x32\x14.google.type.Decimal\x12/\n\x14\x65xpiration_first_day\x18\n \x01(\x0b\x32\x11.google.type.Date\x12.\n\x13\x65xpiration_last_day\x18\x0b \x01(\x0b\x32\x11.google.type.Date\"9\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\r\n\tTYPE_CALL\x10\x01\x12\x0c\n\x08TYPE_PUT\x10\x02\"\x9e\x01\n\x08Longable\x12\x37\n\x05value\x18\x01 \x01(\x0e\x32(.grpc.tradeapi.v1.assets.Longable.Status\x12\x13\n\x0bhalted_days\x18\x02 \x01(\x05\"D\n\x06Status\x12\x11\n\rNOT_AVAILABLE\x10\x00\x12\r\n\tAVAILABLE\x10\x01\x12\x18\n\x14\x41\x43\x43OUNT_NOT_APPROVED\x10\x02\"\xc1\x01\n\tShortable\x12\x38\n\x05value\x18\x01 \x01(\x0e\x32).grpc.tradeapi.v1.assets.Shortable.Status\x12\x13\n\x0bhalted_days\x18\x02 \x01(\x05\"e\n\x06Status\x12\x11\n\rNOT_AVAILABLE\x10\x00\x12\r\n\tAVAILABLE\x10\x01\x12\x07\n\x03HTB\x10\x02\x12\x18\n\x14\x41\x43\x43OUNT_NOT_APPROVED\x10\x03\x12\x16\n\x12\x41VAILABLE_STRATEGY\x10\x04\x32\xa7\x07\n\rAssetsService\x12y\n\tExchanges\x12).grpc.tradeapi.v1.assets.ExchangesRequest\x1a*.grpc.tradeapi.v1.assets.ExchangesResponse\"\x15\x82\xd3\xe4\x93\x02\x0f\x12\r/v1/exchanges\x12m\n\x06\x41ssets\x12&.grpc.tradeapi.v1.assets.AssetsRequest\x1a\'.grpc.tradeapi.v1.assets.AssetsResponse\"\x12\x82\xd3\xe4\x93\x02\x0c\x12\n/v1/assets\x12|\n\x08GetAsset\x12(.grpc.tradeapi.v1.assets.GetAssetRequest\x1a).grpc.tradeapi.v1.assets.GetAssetResponse\"\x1b\x82\xd3\xe4\x93\x02\x15\x12\x13/v1/assets/{symbol}\x12\x95\x01\n\x0eGetAssetParams\x12..grpc.tradeapi.v1.assets.GetAssetParamsRequest\x1a/.grpc.tradeapi.v1.assets.GetAssetParamsResponse\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/v1/assets/{symbol}/params\x12\x9b\x01\n\x0cOptionsChain\x12,.grpc.tradeapi.v1.assets.OptionsChainRequest\x1a-.grpc.tradeapi.v1.assets.OptionsChainResponse\".\x82\xd3\xe4\x93\x02(\x12&/v1/assets/{underlying_symbol}/options\x12\x85\x01\n\x08Schedule\x12(.grpc.tradeapi.v1.assets.ScheduleRequest\x1a).grpc.tradeapi.v1.assets.ScheduleResponse\"$\x82\xd3\xe4\x93\x02\x1e\x12\x1c/v1/assets/{symbol}/schedule\x12p\n\x05\x43lock\x12%.grpc.tradeapi.v1.assets.ClockRequest\x1a&.grpc.tradeapi.v1.assets.ClockResponse\"\x18\x82\xd3\xe4\x93\x02\x12\x12\x10/v1/assets/clockB&P\x01Z\"trade_api/v1/assets/assets_serviceb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'grpc.tradeapi.v1.assets.assets_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'P\001Z\"trade_api/v1/assets/assets_service' - _globals['_GETASSETPARAMSRESPONSE'].fields_by_name['long_collateral']._loaded_options = None - _globals['_GETASSETPARAMSRESPONSE'].fields_by_name['long_collateral']._serialized_options = b'\030\001' - _globals['_GETASSETPARAMSRESPONSE'].fields_by_name['short_collateral']._loaded_options = None - _globals['_GETASSETPARAMSRESPONSE'].fields_by_name['short_collateral']._serialized_options = b'\030\001' - _globals['_ASSETSSERVICE'].methods_by_name['Exchanges']._loaded_options = None - _globals['_ASSETSSERVICE'].methods_by_name['Exchanges']._serialized_options = b'\202\323\344\223\002\017\022\r/v1/exchanges' - _globals['_ASSETSSERVICE'].methods_by_name['Assets']._loaded_options = None - _globals['_ASSETSSERVICE'].methods_by_name['Assets']._serialized_options = b'\202\323\344\223\002\014\022\n/v1/assets' - _globals['_ASSETSSERVICE'].methods_by_name['GetAsset']._loaded_options = None - _globals['_ASSETSSERVICE'].methods_by_name['GetAsset']._serialized_options = b'\202\323\344\223\002\025\022\023/v1/assets/{symbol}' - _globals['_ASSETSSERVICE'].methods_by_name['GetAssetParams']._loaded_options = None - _globals['_ASSETSSERVICE'].methods_by_name['GetAssetParams']._serialized_options = b'\202\323\344\223\002\034\022\032/v1/assets/{symbol}/params' - _globals['_ASSETSSERVICE'].methods_by_name['OptionsChain']._loaded_options = None - _globals['_ASSETSSERVICE'].methods_by_name['OptionsChain']._serialized_options = b'\202\323\344\223\002(\022&/v1/assets/{underlying_symbol}/options' - _globals['_ASSETSSERVICE'].methods_by_name['Schedule']._loaded_options = None - _globals['_ASSETSSERVICE'].methods_by_name['Schedule']._serialized_options = b'\202\323\344\223\002\036\022\034/v1/assets/{symbol}/schedule' - _globals['_ASSETSSERVICE'].methods_by_name['Clock']._loaded_options = None - _globals['_ASSETSSERVICE'].methods_by_name['Clock']._serialized_options = b'\202\323\344\223\002\022\022\020/v1/assets/clock' - _globals['_EXCHANGESREQUEST']._serialized_start=240 - _globals['_EXCHANGESREQUEST']._serialized_end=258 - _globals['_EXCHANGESRESPONSE']._serialized_start=260 - _globals['_EXCHANGESRESPONSE']._serialized_end=333 - _globals['_ASSETSREQUEST']._serialized_start=335 - _globals['_ASSETSREQUEST']._serialized_end=350 - _globals['_ASSETSRESPONSE']._serialized_start=352 - _globals['_ASSETSRESPONSE']._serialized_end=416 - _globals['_GETASSETREQUEST']._serialized_start=418 - _globals['_GETASSETREQUEST']._serialized_end=471 - _globals['_GETASSETRESPONSE']._serialized_start=474 - _globals['_GETASSETRESPONSE']._serialized_end=710 - _globals['_GETASSETPARAMSREQUEST']._serialized_start=712 - _globals['_GETASSETPARAMSREQUEST']._serialized_end=771 - _globals['_GETASSETPARAMSRESPONSE']._serialized_start=774 - _globals['_GETASSETPARAMSRESPONSE']._serialized_end=1252 - _globals['_OPTIONSCHAINREQUEST']._serialized_start=1254 - _globals['_OPTIONSCHAINREQUEST']._serialized_end=1302 - _globals['_OPTIONSCHAINRESPONSE']._serialized_start=1304 - _globals['_OPTIONSCHAINRESPONSE']._serialized_end=1392 - _globals['_SCHEDULEREQUEST']._serialized_start=1394 - _globals['_SCHEDULEREQUEST']._serialized_end=1427 - _globals['_SCHEDULERESPONSE']._serialized_start=1430 - _globals['_SCHEDULERESPONSE']._serialized_end=1601 - _globals['_SCHEDULERESPONSE_SESSIONS']._serialized_start=1536 - _globals['_SCHEDULERESPONSE_SESSIONS']._serialized_end=1601 - _globals['_CLOCKREQUEST']._serialized_start=1603 - _globals['_CLOCKREQUEST']._serialized_end=1617 - _globals['_CLOCKRESPONSE']._serialized_start=1619 - _globals['_CLOCKRESPONSE']._serialized_end=1681 - _globals['_EXCHANGE']._serialized_start=1683 - _globals['_EXCHANGE']._serialized_end=1720 - _globals['_ASSET']._serialized_start=1722 - _globals['_ASSET']._serialized_end=1828 - _globals['_OPTION']._serialized_start=1831 - _globals['_OPTION']._serialized_end=2275 - _globals['_OPTION_TYPE']._serialized_start=2218 - _globals['_OPTION_TYPE']._serialized_end=2275 - _globals['_LONGABLE']._serialized_start=2278 - _globals['_LONGABLE']._serialized_end=2436 - _globals['_LONGABLE_STATUS']._serialized_start=2368 - _globals['_LONGABLE_STATUS']._serialized_end=2436 - _globals['_SHORTABLE']._serialized_start=2439 - _globals['_SHORTABLE']._serialized_end=2632 - _globals['_SHORTABLE_STATUS']._serialized_start=2531 - _globals['_SHORTABLE_STATUS']._serialized_end=2632 - _globals['_ASSETSSERVICE']._serialized_start=2635 - _globals['_ASSETSSERVICE']._serialized_end=3570 -# @@protoc_insertion_point(module_scope) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/assets/assets_service_pb2_grpc.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/assets/assets_service_pb2_grpc.py deleted file mode 100644 index b3303aed23..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/assets/assets_service_pb2_grpc.py +++ /dev/null @@ -1,337 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc -import warnings - -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1.assets import assets_service_pb2 as grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2 - -GRPC_GENERATED_VERSION = '1.74.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in grpc/tradeapi/v1/assets/assets_service_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) - - -class AssetsServiceStub(object): - def __init__(self, channel): - self.Exchanges = channel.unary_unary( - '/grpc.tradeapi.v1.assets.AssetsService/Exchanges', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ExchangesRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ExchangesResponse.FromString, - _registered_method=True) - self.Assets = channel.unary_unary( - '/grpc.tradeapi.v1.assets.AssetsService/Assets', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.AssetsRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.AssetsResponse.FromString, - _registered_method=True) - self.GetAsset = channel.unary_unary( - '/grpc.tradeapi.v1.assets.AssetsService/GetAsset', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetResponse.FromString, - _registered_method=True) - self.GetAssetParams = channel.unary_unary( - '/grpc.tradeapi.v1.assets.AssetsService/GetAssetParams', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetParamsRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetParamsResponse.FromString, - _registered_method=True) - self.OptionsChain = channel.unary_unary( - '/grpc.tradeapi.v1.assets.AssetsService/OptionsChain', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.OptionsChainRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.OptionsChainResponse.FromString, - _registered_method=True) - self.Schedule = channel.unary_unary( - '/grpc.tradeapi.v1.assets.AssetsService/Schedule', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ScheduleRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ScheduleResponse.FromString, - _registered_method=True) - self.Clock = channel.unary_unary( - '/grpc.tradeapi.v1.assets.AssetsService/Clock', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ClockRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ClockResponse.FromString, - _registered_method=True) - - -class AssetsServiceServicer(object): - def Exchanges(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Assets(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetAsset(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetAssetParams(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def OptionsChain(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Schedule(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Clock(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_AssetsServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Exchanges': grpc.unary_unary_rpc_method_handler( - servicer.Exchanges, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ExchangesRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ExchangesResponse.SerializeToString, - ), - 'Assets': grpc.unary_unary_rpc_method_handler( - servicer.Assets, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.AssetsRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.AssetsResponse.SerializeToString, - ), - 'GetAsset': grpc.unary_unary_rpc_method_handler( - servicer.GetAsset, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetResponse.SerializeToString, - ), - 'GetAssetParams': grpc.unary_unary_rpc_method_handler( - servicer.GetAssetParams, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetParamsRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetParamsResponse.SerializeToString, - ), - 'OptionsChain': grpc.unary_unary_rpc_method_handler( - servicer.OptionsChain, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.OptionsChainRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.OptionsChainResponse.SerializeToString, - ), - 'Schedule': grpc.unary_unary_rpc_method_handler( - servicer.Schedule, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ScheduleRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ScheduleResponse.SerializeToString, - ), - 'Clock': grpc.unary_unary_rpc_method_handler( - servicer.Clock, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ClockRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ClockResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'grpc.tradeapi.v1.assets.AssetsService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('grpc.tradeapi.v1.assets.AssetsService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class AssetsService(object): - @staticmethod - def Exchanges(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.assets.AssetsService/Exchanges', - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ExchangesRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ExchangesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Assets(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.assets.AssetsService/Assets', - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.AssetsRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.AssetsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def GetAsset(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.assets.AssetsService/GetAsset', - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def GetAssetParams(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.assets.AssetsService/GetAssetParams', - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetParamsRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.GetAssetParamsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def OptionsChain(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.assets.AssetsService/OptionsChain', - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.OptionsChainRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.OptionsChainResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Schedule(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.assets.AssetsService/Schedule', - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ScheduleRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ScheduleResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Clock(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.assets.AssetsService/Clock', - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ClockRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_assets_dot_assets__service__pb2.ClockResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/auth/__init__.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/auth/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/auth/auth_service_pb2.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/auth/auth_service_pb2.py deleted file mode 100644 index f7f46cd3a7..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/auth/auth_service_pb2.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: grpc/tradeapi/v1/auth/auth_service.proto -# Protobuf Python Version: 6.31.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 31, - 1, - '', - 'grpc/tradeapi/v1/auth/auth_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(grpc/tradeapi/v1/auth/auth_service.proto\x12\x15grpc.tradeapi.v1.auth\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x1d\n\x0b\x41uthRequest\x12\x0e\n\x06secret\x18\x01 \x01(\t\"\x1d\n\x0c\x41uthResponse\x12\r\n\x05token\x18\x01 \x01(\t\"$\n\x13TokenDetailsRequest\x12\r\n\x05token\x18\x01 \x01(\t\"\xda\x01\n\x14TokenDetailsResponse\x12.\n\ncreated_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nexpires_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x0emd_permissions\x18\x03 \x03(\x0b\x32#.grpc.tradeapi.v1.auth.MDPermission\x12\x13\n\x0b\x61\x63\x63ount_ids\x18\x04 \x03(\t\x12\x10\n\x08readonly\x18\x05 \x01(\x08\"\x8d\x03\n\x0cMDPermission\x12\x43\n\x0bquote_level\x18\x01 \x01(\x0e\x32..grpc.tradeapi.v1.auth.MDPermission.QuoteLevel\x12\x15\n\rdelay_minutes\x18\x02 \x01(\x05\x12\r\n\x03mic\x18\x03 \x01(\tH\x00\x12\x11\n\x07\x63ountry\x18\x04 \x01(\tH\x00\x12\x13\n\tcontinent\x18\x05 \x01(\tH\x00\x12\x13\n\tworldwide\x18\x06 \x01(\x08H\x00\"\xc7\x01\n\nQuoteLevel\x12\x1b\n\x17QUOTE_LEVEL_UNSPECIFIED\x10\x00\x12\x1a\n\x16QUOTE_LEVEL_LAST_PRICE\x10\x01\x12\x1e\n\x1aQUOTE_LEVEL_BEST_BID_OFFER\x10\x02\x12\x1f\n\x1bQUOTE_LEVEL_DEPTH_OF_MARKET\x10\x03\x12\x1d\n\x19QUOTE_LEVEL_DEPTH_OF_BOOK\x10\x04\x12 \n\x1cQUOTE_LEVEL_ACCESS_FORBIDDEN\x10\x05\x42\x0b\n\tcondition\",\n\x1aSubscribeJwtRenewalRequest\x12\x0e\n\x06secret\x18\x01 \x01(\t\",\n\x1bSubscribeJwtRenewalResponse\x12\r\n\x05token\x18\x01 \x01(\t2\x82\x03\n\x0b\x41uthService\x12h\n\x04\x41uth\x12\".grpc.tradeapi.v1.auth.AuthRequest\x1a#.grpc.tradeapi.v1.auth.AuthResponse\"\x17\x82\xd3\xe4\x93\x02\x11\"\x0c/v1/sessions:\x01*\x12\x88\x01\n\x0cTokenDetails\x12*.grpc.tradeapi.v1.auth.TokenDetailsRequest\x1a+.grpc.tradeapi.v1.auth.TokenDetailsResponse\"\x1f\x82\xd3\xe4\x93\x02\x19\"\x14/v1/sessions/details:\x01*\x12~\n\x13SubscribeJwtRenewal\x12\x31.grpc.tradeapi.v1.auth.SubscribeJwtRenewalRequest\x1a\x32.grpc.tradeapi.v1.auth.SubscribeJwtRenewalResponse0\x01\x42\"P\x01Z\x1etrade_api/v1/auth/auth_serviceb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'grpc.tradeapi.v1.auth.auth_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'P\001Z\036trade_api/v1/auth/auth_service' - _globals['_AUTHSERVICE'].methods_by_name['Auth']._loaded_options = None - _globals['_AUTHSERVICE'].methods_by_name['Auth']._serialized_options = b'\202\323\344\223\002\021\"\014/v1/sessions:\001*' - _globals['_AUTHSERVICE'].methods_by_name['TokenDetails']._loaded_options = None - _globals['_AUTHSERVICE'].methods_by_name['TokenDetails']._serialized_options = b'\202\323\344\223\002\031\"\024/v1/sessions/details:\001*' - _globals['_AUTHREQUEST']._serialized_start=130 - _globals['_AUTHREQUEST']._serialized_end=159 - _globals['_AUTHRESPONSE']._serialized_start=161 - _globals['_AUTHRESPONSE']._serialized_end=190 - _globals['_TOKENDETAILSREQUEST']._serialized_start=192 - _globals['_TOKENDETAILSREQUEST']._serialized_end=228 - _globals['_TOKENDETAILSRESPONSE']._serialized_start=231 - _globals['_TOKENDETAILSRESPONSE']._serialized_end=449 - _globals['_MDPERMISSION']._serialized_start=452 - _globals['_MDPERMISSION']._serialized_end=849 - _globals['_MDPERMISSION_QUOTELEVEL']._serialized_start=637 - _globals['_MDPERMISSION_QUOTELEVEL']._serialized_end=836 - _globals['_SUBSCRIBEJWTRENEWALREQUEST']._serialized_start=851 - _globals['_SUBSCRIBEJWTRENEWALREQUEST']._serialized_end=895 - _globals['_SUBSCRIBEJWTRENEWALRESPONSE']._serialized_start=897 - _globals['_SUBSCRIBEJWTRENEWALRESPONSE']._serialized_end=941 - _globals['_AUTHSERVICE']._serialized_start=944 - _globals['_AUTHSERVICE']._serialized_end=1330 -# @@protoc_insertion_point(module_scope) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/auth/auth_service_pb2_grpc.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/auth/auth_service_pb2_grpc.py deleted file mode 100644 index 37d046ef0d..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/auth/auth_service_pb2_grpc.py +++ /dev/null @@ -1,169 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc -import warnings - -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1.auth import auth_service_pb2 as grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2 - -GRPC_GENERATED_VERSION = '1.74.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in grpc/tradeapi/v1/auth/auth_service_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) - - -class AuthServiceStub(object): - def __init__(self, channel): - self.Auth = channel.unary_unary( - '/grpc.tradeapi.v1.auth.AuthService/Auth', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.AuthRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.AuthResponse.FromString, - _registered_method=True) - self.TokenDetails = channel.unary_unary( - '/grpc.tradeapi.v1.auth.AuthService/TokenDetails', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.TokenDetailsRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.TokenDetailsResponse.FromString, - _registered_method=True) - self.SubscribeJwtRenewal = channel.unary_stream( - '/grpc.tradeapi.v1.auth.AuthService/SubscribeJwtRenewal', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.SubscribeJwtRenewalRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.SubscribeJwtRenewalResponse.FromString, - _registered_method=True) - - -class AuthServiceServicer(object): - def Auth(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def TokenDetails(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SubscribeJwtRenewal(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_AuthServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Auth': grpc.unary_unary_rpc_method_handler( - servicer.Auth, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.AuthRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.AuthResponse.SerializeToString, - ), - 'TokenDetails': grpc.unary_unary_rpc_method_handler( - servicer.TokenDetails, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.TokenDetailsRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.TokenDetailsResponse.SerializeToString, - ), - 'SubscribeJwtRenewal': grpc.unary_stream_rpc_method_handler( - servicer.SubscribeJwtRenewal, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.SubscribeJwtRenewalRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.SubscribeJwtRenewalResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'grpc.tradeapi.v1.auth.AuthService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('grpc.tradeapi.v1.auth.AuthService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class AuthService(object): - @staticmethod - def Auth(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.auth.AuthService/Auth', - grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.AuthRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.AuthResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def TokenDetails(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.auth.AuthService/TokenDetails', - grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.TokenDetailsRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.TokenDetailsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def SubscribeJwtRenewal(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/grpc.tradeapi.v1.auth.AuthService/SubscribeJwtRenewal', - grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.SubscribeJwtRenewalRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_auth_dot_auth__service__pb2.SubscribeJwtRenewalResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/marketdata/__init__.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/marketdata/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/marketdata/marketdata_service_pb2.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/marketdata/marketdata_service_pb2.py deleted file mode 100644 index 10d40674a3..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/marketdata/marketdata_service_pb2.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: grpc/tradeapi/v1/marketdata/marketdata_service.proto -# Protobuf Python Version: 6.31.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 31, - 1, - '', - 'grpc/tradeapi/v1/marketdata/marketdata_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.type import decimal_pb2 as google_dot_type_dot_decimal__pb2 -from google.type import interval_pb2 as google_dot_type_dot_interval__pb2 -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1 import side_pb2 as grpc_dot_tradeapi_dot_v1_dot_side__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n4grpc/tradeapi/v1/marketdata/marketdata_service.proto\x12\x1bgrpc.tradeapi.v1.marketdata\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19google/type/decimal.proto\x1a\x1agoogle/type/interval.proto\x1a\x1bgrpc/tradeapi/v1/side.proto\"\x81\x01\n\x0b\x42\x61rsRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x39\n\ttimeframe\x18\x02 \x01(\x0e\x32&.grpc.tradeapi.v1.marketdata.TimeFrame\x12\'\n\x08interval\x18\x03 \x01(\x0b\x32\x15.google.type.Interval\"N\n\x0c\x42\x61rsResponse\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12.\n\x04\x62\x61rs\x18\x02 \x03(\x0b\x32 .grpc.tradeapi.v1.marketdata.Bar\"\x1e\n\x0cQuoteRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\"R\n\rQuoteResponse\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x31\n\x05quote\x18\x02 \x01(\x0b\x32\".grpc.tradeapi.v1.marketdata.Quote\"\"\n\x10OrderBookRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\"^\n\x11OrderBookResponse\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x39\n\torderbook\x18\x02 \x01(\x0b\x32&.grpc.tradeapi.v1.marketdata.OrderBook\"%\n\x13LatestTradesRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\"Z\n\x14LatestTradesResponse\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x32\n\x06trades\x18\x02 \x03(\x0b\x32\".grpc.tradeapi.v1.marketdata.Trade\"(\n\x15SubscribeQuoteRequest\x12\x0f\n\x07symbols\x18\x01 \x03(\t\"\x84\x01\n\x16SubscribeQuoteResponse\x12\x31\n\x05quote\x18\x01 \x03(\x0b\x32\".grpc.tradeapi.v1.marketdata.Quote\x12\x37\n\x05\x65rror\x18\x02 \x01(\x0b\x32(.grpc.tradeapi.v1.marketdata.StreamError\"+\n\x19SubscribeOrderBookRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\"^\n\x1aSubscribeOrderBookResponse\x12@\n\norder_book\x18\x01 \x03(\x0b\x32,.grpc.tradeapi.v1.marketdata.StreamOrderBook\"a\n\x14SubscribeBarsRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x39\n\ttimeframe\x18\x02 \x01(\x0e\x32&.grpc.tradeapi.v1.marketdata.TimeFrame\"W\n\x15SubscribeBarsResponse\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12.\n\x04\x62\x61rs\x18\x02 \x03(\x0b\x32 .grpc.tradeapi.v1.marketdata.Bar\"\xea\x01\n\x03\x42\x61r\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\"\n\x04open\x18\x02 \x01(\x0b\x32\x14.google.type.Decimal\x12\"\n\x04high\x18\x03 \x01(\x0b\x32\x14.google.type.Decimal\x12!\n\x03low\x18\x04 \x01(\x0b\x32\x14.google.type.Decimal\x12#\n\x05\x63lose\x18\x05 \x01(\x0b\x32\x14.google.type.Decimal\x12$\n\x06volume\x18\x06 \x01(\x0b\x32\x14.google.type.Decimal\"\xc8\x07\n\x05Quote\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x03\x61sk\x18\x03 \x01(\x0b\x32\x14.google.type.Decimal\x12&\n\x08\x61sk_size\x18\x04 \x01(\x0b\x32\x14.google.type.Decimal\x12!\n\x03\x62id\x18\x05 \x01(\x0b\x32\x14.google.type.Decimal\x12&\n\x08\x62id_size\x18\x06 \x01(\x0b\x32\x14.google.type.Decimal\x12\"\n\x04last\x18\x07 \x01(\x0b\x32\x14.google.type.Decimal\x12\'\n\tlast_size\x18\x08 \x01(\x0b\x32\x14.google.type.Decimal\x12$\n\x06volume\x18\t \x01(\x0b\x32\x14.google.type.Decimal\x12&\n\x08turnover\x18\n \x01(\x0b\x32\x14.google.type.Decimal\x12\"\n\x04open\x18\x0b \x01(\x0b\x32\x14.google.type.Decimal\x12\"\n\x04high\x18\x0c \x01(\x0b\x32\x14.google.type.Decimal\x12!\n\x03low\x18\r \x01(\x0b\x32\x14.google.type.Decimal\x12#\n\x05\x63lose\x18\x0e \x01(\x0b\x32\x14.google.type.Decimal\x12$\n\x06\x63hange\x18\x0f \x01(\x0b\x32\x14.google.type.Decimal\x12;\n\x06option\x18\x32 \x01(\x0b\x32).grpc.tradeapi.v1.marketdata.Quote.OptionH\x00\x1a\xce\x02\n\x06Option\x12+\n\ropen_interest\x18\x01 \x01(\x0b\x32\x14.google.type.Decimal\x12\x30\n\x12implied_volatility\x18\x02 \x01(\x0b\x32\x14.google.type.Decimal\x12/\n\x11theoretical_price\x18\x03 \x01(\x0b\x32\x14.google.type.Decimal\x12#\n\x05\x64\x65lta\x18\x04 \x01(\x0b\x32\x14.google.type.Decimal\x12#\n\x05gamma\x18\x05 \x01(\x0b\x32\x14.google.type.Decimal\x12#\n\x05theta\x18\x06 \x01(\x0b\x32\x14.google.type.Decimal\x12\"\n\x04vega\x18\x07 \x01(\x0b\x32\x14.google.type.Decimal\x12!\n\x03rho\x18\x08 \x01(\x0b\x32\x14.google.type.DecimalB\x0b\n\tadditions\"\xa7\x03\n\tOrderBook\x12\x38\n\x04rows\x18\x01 \x03(\x0b\x32*.grpc.tradeapi.v1.marketdata.OrderBook.Row\x1a\xdf\x02\n\x03Row\x12#\n\x05price\x18\x01 \x01(\x0b\x32\x14.google.type.Decimal\x12)\n\tsell_size\x18\x02 \x01(\x0b\x32\x14.google.type.DecimalH\x00\x12(\n\x08\x62uy_size\x18\x03 \x01(\x0b\x32\x14.google.type.DecimalH\x00\x12\x41\n\x06\x61\x63tion\x18\x04 \x01(\x0e\x32\x31.grpc.tradeapi.v1.marketdata.OrderBook.Row.Action\x12\x0c\n\x04mpid\x18\x05 \x01(\t\x12-\n\ttimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"V\n\x06\x41\x63tion\x12\x16\n\x12\x41\x43TION_UNSPECIFIED\x10\x00\x12\x11\n\rACTION_REMOVE\x10\x01\x12\x0e\n\nACTION_ADD\x10\x02\x12\x11\n\rACTION_UPDATE\x10\x03\x42\x06\n\x04side\"\xc5\x01\n\x05Trade\x12\x10\n\x08trade_id\x18\x01 \x01(\t\x12\x0c\n\x04mpid\x18\x02 \x01(\t\x12-\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12#\n\x05price\x18\x04 \x01(\x0b\x32\x14.google.type.Decimal\x12\"\n\x04size\x18\x05 \x01(\x0b\x32\x14.google.type.Decimal\x12$\n\x04side\x18\x06 \x01(\x0e\x32\x16.grpc.tradeapi.v1.Side\"0\n\x0bStreamError\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\"\xc9\x03\n\x0fStreamOrderBook\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12>\n\x04rows\x18\x02 \x03(\x0b\x32\x30.grpc.tradeapi.v1.marketdata.StreamOrderBook.Row\x1a\xe5\x02\n\x03Row\x12#\n\x05price\x18\x01 \x01(\x0b\x32\x14.google.type.Decimal\x12)\n\tsell_size\x18\x02 \x01(\x0b\x32\x14.google.type.DecimalH\x00\x12(\n\x08\x62uy_size\x18\x03 \x01(\x0b\x32\x14.google.type.DecimalH\x00\x12G\n\x06\x61\x63tion\x18\x04 \x01(\x0e\x32\x37.grpc.tradeapi.v1.marketdata.StreamOrderBook.Row.Action\x12\x0c\n\x04mpid\x18\x05 \x01(\t\x12-\n\ttimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"V\n\x06\x41\x63tion\x12\x16\n\x12\x41\x43TION_UNSPECIFIED\x10\x00\x12\x11\n\rACTION_REMOVE\x10\x01\x12\x0e\n\nACTION_ADD\x10\x02\x12\x11\n\rACTION_UPDATE\x10\x03\x42\x06\n\x04side\".\n\x1cSubscribeLatestTradesRequest\x12\x0e\n\x06symbol\x18\x01 \x01(\t\"c\n\x1dSubscribeLatestTradesResponse\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12\x32\n\x06trades\x18\x02 \x03(\x0b\x32\".grpc.tradeapi.v1.marketdata.Trade*\x8b\x02\n\tTimeFrame\x12\x1a\n\x16TIME_FRAME_UNSPECIFIED\x10\x00\x12\x11\n\rTIME_FRAME_M1\x10\x01\x12\x11\n\rTIME_FRAME_M5\x10\x05\x12\x12\n\x0eTIME_FRAME_M15\x10\t\x12\x12\n\x0eTIME_FRAME_M30\x10\x0b\x12\x11\n\rTIME_FRAME_H1\x10\x0c\x12\x11\n\rTIME_FRAME_H2\x10\r\x12\x11\n\rTIME_FRAME_H4\x10\x0f\x12\x11\n\rTIME_FRAME_H8\x10\x11\x12\x10\n\x0cTIME_FRAME_D\x10\x13\x12\x10\n\x0cTIME_FRAME_W\x10\x14\x12\x11\n\rTIME_FRAME_MN\x10\x15\x12\x11\n\rTIME_FRAME_QR\x10\x16\x32\x80\t\n\x11MarketDataService\x12\x82\x01\n\x04\x42\x61rs\x12(.grpc.tradeapi.v1.marketdata.BarsRequest\x1a).grpc.tradeapi.v1.marketdata.BarsResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/v1/instruments/{symbol}/bars\x12\x92\x01\n\tLastQuote\x12).grpc.tradeapi.v1.marketdata.QuoteRequest\x1a*.grpc.tradeapi.v1.marketdata.QuoteResponse\".\x82\xd3\xe4\x93\x02(\x12&/v1/instruments/{symbol}/quotes/latest\x12\x96\x01\n\tOrderBook\x12-.grpc.tradeapi.v1.marketdata.OrderBookRequest\x1a..grpc.tradeapi.v1.marketdata.OrderBookResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/instruments/{symbol}/orderbook\x12\xa3\x01\n\x0cLatestTrades\x12\x30.grpc.tradeapi.v1.marketdata.LatestTradesRequest\x1a\x31.grpc.tradeapi.v1.marketdata.LatestTradesResponse\".\x82\xd3\xe4\x93\x02(\x12&/v1/instruments/{symbol}/trades/latest\x12{\n\x0eSubscribeQuote\x12\x32.grpc.tradeapi.v1.marketdata.SubscribeQuoteRequest\x1a\x33.grpc.tradeapi.v1.marketdata.SubscribeQuoteResponse0\x01\x12\x87\x01\n\x12SubscribeOrderBook\x12\x36.grpc.tradeapi.v1.marketdata.SubscribeOrderBookRequest\x1a\x37.grpc.tradeapi.v1.marketdata.SubscribeOrderBookResponse0\x01\x12\x90\x01\n\x15SubscribeLatestTrades\x12\x39.grpc.tradeapi.v1.marketdata.SubscribeLatestTradesRequest\x1a:.grpc.tradeapi.v1.marketdata.SubscribeLatestTradesResponse0\x01\x12x\n\rSubscribeBars\x12\x31.grpc.tradeapi.v1.marketdata.SubscribeBarsRequest\x1a\x32.grpc.tradeapi.v1.marketdata.SubscribeBarsResponse0\x01\x42.P\x01Z*trade_api/v1/marketdata/marketdata_serviceb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'grpc.tradeapi.v1.marketdata.marketdata_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'P\001Z*trade_api/v1/marketdata/marketdata_service' - _globals['_MARKETDATASERVICE'].methods_by_name['Bars']._loaded_options = None - _globals['_MARKETDATASERVICE'].methods_by_name['Bars']._serialized_options = b'\202\323\344\223\002\037\022\035/v1/instruments/{symbol}/bars' - _globals['_MARKETDATASERVICE'].methods_by_name['LastQuote']._loaded_options = None - _globals['_MARKETDATASERVICE'].methods_by_name['LastQuote']._serialized_options = b'\202\323\344\223\002(\022&/v1/instruments/{symbol}/quotes/latest' - _globals['_MARKETDATASERVICE'].methods_by_name['OrderBook']._loaded_options = None - _globals['_MARKETDATASERVICE'].methods_by_name['OrderBook']._serialized_options = b'\202\323\344\223\002$\022\"/v1/instruments/{symbol}/orderbook' - _globals['_MARKETDATASERVICE'].methods_by_name['LatestTrades']._loaded_options = None - _globals['_MARKETDATASERVICE'].methods_by_name['LatestTrades']._serialized_options = b'\202\323\344\223\002(\022&/v1/instruments/{symbol}/trades/latest' - _globals['_TIMEFRAME']._serialized_start=3823 - _globals['_TIMEFRAME']._serialized_end=4090 - _globals['_BARSREQUEST']._serialized_start=233 - _globals['_BARSREQUEST']._serialized_end=362 - _globals['_BARSRESPONSE']._serialized_start=364 - _globals['_BARSRESPONSE']._serialized_end=442 - _globals['_QUOTEREQUEST']._serialized_start=444 - _globals['_QUOTEREQUEST']._serialized_end=474 - _globals['_QUOTERESPONSE']._serialized_start=476 - _globals['_QUOTERESPONSE']._serialized_end=558 - _globals['_ORDERBOOKREQUEST']._serialized_start=560 - _globals['_ORDERBOOKREQUEST']._serialized_end=594 - _globals['_ORDERBOOKRESPONSE']._serialized_start=596 - _globals['_ORDERBOOKRESPONSE']._serialized_end=690 - _globals['_LATESTTRADESREQUEST']._serialized_start=692 - _globals['_LATESTTRADESREQUEST']._serialized_end=729 - _globals['_LATESTTRADESRESPONSE']._serialized_start=731 - _globals['_LATESTTRADESRESPONSE']._serialized_end=821 - _globals['_SUBSCRIBEQUOTEREQUEST']._serialized_start=823 - _globals['_SUBSCRIBEQUOTEREQUEST']._serialized_end=863 - _globals['_SUBSCRIBEQUOTERESPONSE']._serialized_start=866 - _globals['_SUBSCRIBEQUOTERESPONSE']._serialized_end=998 - _globals['_SUBSCRIBEORDERBOOKREQUEST']._serialized_start=1000 - _globals['_SUBSCRIBEORDERBOOKREQUEST']._serialized_end=1043 - _globals['_SUBSCRIBEORDERBOOKRESPONSE']._serialized_start=1045 - _globals['_SUBSCRIBEORDERBOOKRESPONSE']._serialized_end=1139 - _globals['_SUBSCRIBEBARSREQUEST']._serialized_start=1141 - _globals['_SUBSCRIBEBARSREQUEST']._serialized_end=1238 - _globals['_SUBSCRIBEBARSRESPONSE']._serialized_start=1240 - _globals['_SUBSCRIBEBARSRESPONSE']._serialized_end=1327 - _globals['_BAR']._serialized_start=1330 - _globals['_BAR']._serialized_end=1564 - _globals['_QUOTE']._serialized_start=1567 - _globals['_QUOTE']._serialized_end=2535 - _globals['_QUOTE_OPTION']._serialized_start=2188 - _globals['_QUOTE_OPTION']._serialized_end=2522 - _globals['_ORDERBOOK']._serialized_start=2538 - _globals['_ORDERBOOK']._serialized_end=2961 - _globals['_ORDERBOOK_ROW']._serialized_start=2610 - _globals['_ORDERBOOK_ROW']._serialized_end=2961 - _globals['_ORDERBOOK_ROW_ACTION']._serialized_start=2867 - _globals['_ORDERBOOK_ROW_ACTION']._serialized_end=2953 - _globals['_TRADE']._serialized_start=2964 - _globals['_TRADE']._serialized_end=3161 - _globals['_STREAMERROR']._serialized_start=3163 - _globals['_STREAMERROR']._serialized_end=3211 - _globals['_STREAMORDERBOOK']._serialized_start=3214 - _globals['_STREAMORDERBOOK']._serialized_end=3671 - _globals['_STREAMORDERBOOK_ROW']._serialized_start=3314 - _globals['_STREAMORDERBOOK_ROW']._serialized_end=3671 - _globals['_STREAMORDERBOOK_ROW_ACTION']._serialized_start=2867 - _globals['_STREAMORDERBOOK_ROW_ACTION']._serialized_end=2953 - _globals['_SUBSCRIBELATESTTRADESREQUEST']._serialized_start=3673 - _globals['_SUBSCRIBELATESTTRADESREQUEST']._serialized_end=3719 - _globals['_SUBSCRIBELATESTTRADESRESPONSE']._serialized_start=3721 - _globals['_SUBSCRIBELATESTTRADESRESPONSE']._serialized_end=3820 - _globals['_MARKETDATASERVICE']._serialized_start=4093 - _globals['_MARKETDATASERVICE']._serialized_end=5245 -# @@protoc_insertion_point(module_scope) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/marketdata/marketdata_service_pb2_grpc.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/marketdata/marketdata_service_pb2_grpc.py deleted file mode 100644 index 9960f5172f..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/marketdata/marketdata_service_pb2_grpc.py +++ /dev/null @@ -1,378 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc -import warnings - -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1.marketdata import marketdata_service_pb2 as grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2 - -GRPC_GENERATED_VERSION = '1.74.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in grpc/tradeapi/v1/marketdata/marketdata_service_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) - - -class MarketDataServiceStub(object): - def __init__(self, channel): - self.Bars = channel.unary_unary( - '/grpc.tradeapi.v1.marketdata.MarketDataService/Bars', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.BarsRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.BarsResponse.FromString, - _registered_method=True) - self.LastQuote = channel.unary_unary( - '/grpc.tradeapi.v1.marketdata.MarketDataService/LastQuote', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.QuoteRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.QuoteResponse.FromString, - _registered_method=True) - self.OrderBook = channel.unary_unary( - '/grpc.tradeapi.v1.marketdata.MarketDataService/OrderBook', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.OrderBookRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.OrderBookResponse.FromString, - _registered_method=True) - self.LatestTrades = channel.unary_unary( - '/grpc.tradeapi.v1.marketdata.MarketDataService/LatestTrades', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.LatestTradesRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.LatestTradesResponse.FromString, - _registered_method=True) - self.SubscribeQuote = channel.unary_stream( - '/grpc.tradeapi.v1.marketdata.MarketDataService/SubscribeQuote', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeQuoteRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeQuoteResponse.FromString, - _registered_method=True) - self.SubscribeOrderBook = channel.unary_stream( - '/grpc.tradeapi.v1.marketdata.MarketDataService/SubscribeOrderBook', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeOrderBookRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeOrderBookResponse.FromString, - _registered_method=True) - self.SubscribeLatestTrades = channel.unary_stream( - '/grpc.tradeapi.v1.marketdata.MarketDataService/SubscribeLatestTrades', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeLatestTradesRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeLatestTradesResponse.FromString, - _registered_method=True) - self.SubscribeBars = channel.unary_stream( - '/grpc.tradeapi.v1.marketdata.MarketDataService/SubscribeBars', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeBarsRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeBarsResponse.FromString, - _registered_method=True) - - -class MarketDataServiceServicer(object): - def Bars(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def LastQuote(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def OrderBook(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def LatestTrades(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SubscribeQuote(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SubscribeOrderBook(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SubscribeLatestTrades(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SubscribeBars(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_MarketDataServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Bars': grpc.unary_unary_rpc_method_handler( - servicer.Bars, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.BarsRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.BarsResponse.SerializeToString, - ), - 'LastQuote': grpc.unary_unary_rpc_method_handler( - servicer.LastQuote, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.QuoteRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.QuoteResponse.SerializeToString, - ), - 'OrderBook': grpc.unary_unary_rpc_method_handler( - servicer.OrderBook, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.OrderBookRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.OrderBookResponse.SerializeToString, - ), - 'LatestTrades': grpc.unary_unary_rpc_method_handler( - servicer.LatestTrades, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.LatestTradesRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.LatestTradesResponse.SerializeToString, - ), - 'SubscribeQuote': grpc.unary_stream_rpc_method_handler( - servicer.SubscribeQuote, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeQuoteRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeQuoteResponse.SerializeToString, - ), - 'SubscribeOrderBook': grpc.unary_stream_rpc_method_handler( - servicer.SubscribeOrderBook, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeOrderBookRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeOrderBookResponse.SerializeToString, - ), - 'SubscribeLatestTrades': grpc.unary_stream_rpc_method_handler( - servicer.SubscribeLatestTrades, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeLatestTradesRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeLatestTradesResponse.SerializeToString, - ), - 'SubscribeBars': grpc.unary_stream_rpc_method_handler( - servicer.SubscribeBars, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeBarsRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeBarsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'grpc.tradeapi.v1.marketdata.MarketDataService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('grpc.tradeapi.v1.marketdata.MarketDataService', rpc_method_handlers) - - -class MarketDataService(object): - @staticmethod - def Bars(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.marketdata.MarketDataService/Bars', - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.BarsRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.BarsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def LastQuote(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.marketdata.MarketDataService/LastQuote', - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.QuoteRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.QuoteResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def OrderBook(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.marketdata.MarketDataService/OrderBook', - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.OrderBookRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.OrderBookResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def LatestTrades(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.marketdata.MarketDataService/LatestTrades', - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.LatestTradesRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.LatestTradesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def SubscribeQuote(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/grpc.tradeapi.v1.marketdata.MarketDataService/SubscribeQuote', - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeQuoteRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeQuoteResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def SubscribeOrderBook(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/grpc.tradeapi.v1.marketdata.MarketDataService/SubscribeOrderBook', - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeOrderBookRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeOrderBookResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def SubscribeLatestTrades(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/grpc.tradeapi.v1.marketdata.MarketDataService/SubscribeLatestTrades', - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeLatestTradesRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeLatestTradesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def SubscribeBars(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/grpc.tradeapi.v1.marketdata.MarketDataService/SubscribeBars', - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeBarsRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_marketdata_dot_marketdata__service__pb2.SubscribeBarsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/orders/__init__.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/orders/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/orders/orders_service_pb2.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/orders/orders_service_pb2.py deleted file mode 100644 index 23781f1094..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/orders/orders_service_pb2.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: grpc/tradeapi/v1/orders/orders_service.proto -# Protobuf Python Version: 6.31.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 31, - 1, - '', - 'grpc/tradeapi/v1/orders/orders_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.type import decimal_pb2 as google_dot_type_dot_decimal__pb2 -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1 import side_pb2 as grpc_dot_tradeapi_dot_v1_dot_side__pb2 -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1 import trade_pb2 as grpc_dot_tradeapi_dot_v1_dot_trade__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,grpc/tradeapi/v1/orders/orders_service.proto\x12\x17grpc.tradeapi.v1.orders\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19google/type/decimal.proto\x1a\x1bgrpc/tradeapi/v1/side.proto\x1a\x1cgrpc/tradeapi/v1/trade.proto\"\xb5\x02\n\x11OrderTradeRequest\x12\x41\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x31.grpc.tradeapi.v1.orders.OrderTradeRequest.Action\x12\x46\n\tdata_type\x18\x02 \x01(\x0e\x32\x33.grpc.tradeapi.v1.orders.OrderTradeRequest.DataType\x12\x12\n\naccount_id\x18\x03 \x01(\t\"6\n\x06\x41\x63tion\x12\x14\n\x10\x41\x43TION_SUBSCRIBE\x10\x00\x12\x16\n\x12\x41\x43TION_UNSUBSCRIBE\x10\x01\"I\n\x08\x44\x61taType\x12\x11\n\rDATA_TYPE_ALL\x10\x00\x12\x14\n\x10\x44\x41TA_TYPE_ORDERS\x10\x01\x12\x14\n\x10\x44\x41TA_TYPE_TRADES\x10\x02\"y\n\x12OrderTradeResponse\x12\x33\n\x06orders\x18\x01 \x03(\x0b\x32#.grpc.tradeapi.v1.orders.OrderState\x12.\n\x06trades\x18\x02 \x03(\x0b\x32\x1e.grpc.tradeapi.v1.AccountTrade\"7\n\x0fGetOrderRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12\x10\n\x08order_id\x18\x02 \x01(\t\"\xfe\x03\n\x05Order\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12\x0e\n\x06symbol\x18\x02 \x01(\t\x12&\n\x08quantity\x18\x03 \x01(\x0b\x32\x14.google.type.Decimal\x12$\n\x04side\x18\x04 \x01(\x0e\x32\x16.grpc.tradeapi.v1.Side\x12\x30\n\x04type\x18\x05 \x01(\x0e\x32\".grpc.tradeapi.v1.orders.OrderType\x12;\n\rtime_in_force\x18\x06 \x01(\x0e\x32$.grpc.tradeapi.v1.orders.TimeInForce\x12)\n\x0blimit_price\x18\x07 \x01(\x0b\x32\x14.google.type.Decimal\x12(\n\nstop_price\x18\x08 \x01(\x0b\x32\x14.google.type.Decimal\x12>\n\x0estop_condition\x18\t \x01(\x0e\x32&.grpc.tradeapi.v1.orders.StopCondition\x12*\n\x04legs\x18\n \x03(\x0b\x32\x1c.grpc.tradeapi.v1.orders.Leg\x12\x17\n\x0f\x63lient_order_id\x18\x0b \x01(\t\x12:\n\x0cvalid_before\x18\x0c \x01(\x0e\x32$.grpc.tradeapi.v1.orders.ValidBefore\"c\n\x03Leg\x12\x0e\n\x06symbol\x18\x01 \x01(\t\x12&\n\x08quantity\x18\x02 \x01(\x0b\x32\x14.google.type.Decimal\x12$\n\x04side\x18\x03 \x01(\x0e\x32\x16.grpc.tradeapi.v1.Side\"\xa5\x02\n\nOrderState\x12\x10\n\x08order_id\x18\x01 \x01(\t\x12\x0f\n\x07\x65xec_id\x18\x02 \x01(\t\x12\x34\n\x06status\x18\x03 \x01(\x0e\x32$.grpc.tradeapi.v1.orders.OrderStatus\x12-\n\x05order\x18\x04 \x01(\x0b\x32\x1e.grpc.tradeapi.v1.orders.Order\x12/\n\x0btransact_at\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12-\n\taccept_at\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bwithdraw_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"#\n\rOrdersRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\"E\n\x0eOrdersResponse\x12\x33\n\x06orders\x18\x01 \x03(\x0b\x32#.grpc.tradeapi.v1.orders.OrderState\":\n\x12\x43\x61ncelOrderRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\x12\x10\n\x08order_id\x18\x02 \x01(\t*\x9e\x01\n\tOrderType\x12\x1a\n\x16ORDER_TYPE_UNSPECIFIED\x10\x00\x12\x15\n\x11ORDER_TYPE_MARKET\x10\x01\x12\x14\n\x10ORDER_TYPE_LIMIT\x10\x02\x12\x13\n\x0fORDER_TYPE_STOP\x10\x03\x12\x19\n\x15ORDER_TYPE_STOP_LIMIT\x10\x04\x12\x18\n\x14ORDER_TYPE_MULTI_LEG\x10\x05*\x89\x02\n\x0bTimeInForce\x12\x1d\n\x19TIME_IN_FORCE_UNSPECIFIED\x10\x00\x12\x15\n\x11TIME_IN_FORCE_DAY\x10\x01\x12\"\n\x1eTIME_IN_FORCE_GOOD_TILL_CANCEL\x10\x02\x12$\n TIME_IN_FORCE_GOOD_TILL_CROSSING\x10\x03\x12\x15\n\x11TIME_IN_FORCE_EXT\x10\x04\x12\x19\n\x15TIME_IN_FORCE_ON_OPEN\x10\x05\x12\x1a\n\x16TIME_IN_FORCE_ON_CLOSE\x10\x06\x12\x15\n\x11TIME_IN_FORCE_IOC\x10\x07\x12\x15\n\x11TIME_IN_FORCE_FOK\x10\x08*i\n\rStopCondition\x12\x1e\n\x1aSTOP_CONDITION_UNSPECIFIED\x10\x00\x12\x1a\n\x16STOP_CONDITION_LAST_UP\x10\x01\x12\x1c\n\x18STOP_CONDITION_LAST_DOWN\x10\x02*\xe7\x06\n\x0bOrderStatus\x12\x1c\n\x18ORDER_STATUS_UNSPECIFIED\x10\x00\x12\x14\n\x10ORDER_STATUS_NEW\x10\x01\x12!\n\x1dORDER_STATUS_PARTIALLY_FILLED\x10\x02\x12\x17\n\x13ORDER_STATUS_FILLED\x10\x03\x12\x1d\n\x19ORDER_STATUS_DONE_FOR_DAY\x10\x04\x12\x19\n\x15ORDER_STATUS_CANCELED\x10\x05\x12\x19\n\x15ORDER_STATUS_REPLACED\x10\x06\x12\x1f\n\x1bORDER_STATUS_PENDING_CANCEL\x10\x07\x12\x19\n\x15ORDER_STATUS_REJECTED\x10\t\x12\x1a\n\x16ORDER_STATUS_SUSPENDED\x10\n\x12\x1c\n\x18ORDER_STATUS_PENDING_NEW\x10\x0b\x12\x18\n\x14ORDER_STATUS_EXPIRED\x10\r\x12\x17\n\x13ORDER_STATUS_FAILED\x10\x10\x12\x1b\n\x17ORDER_STATUS_FORWARDING\x10\x11\x12\x15\n\x11ORDER_STATUS_WAIT\x10\x12\x12!\n\x1dORDER_STATUS_DENIED_BY_BROKER\x10\x13\x12%\n!ORDER_STATUS_REJECTED_BY_EXCHANGE\x10\x14\x12\x19\n\x15ORDER_STATUS_WATCHING\x10\x15\x12\x19\n\x15ORDER_STATUS_EXECUTED\x10\x16\x12\x19\n\x15ORDER_STATUS_DISABLED\x10\x17\x12\x1a\n\x16ORDER_STATUS_LINK_WAIT\x10\x18\x12\x1e\n\x1aORDER_STATUS_SL_GUARD_TIME\x10\x1b\x12\x1c\n\x18ORDER_STATUS_SL_EXECUTED\x10\x1c\x12\x1e\n\x1aORDER_STATUS_SL_FORWARDING\x10\x1d\x12\x1e\n\x1aORDER_STATUS_TP_GUARD_TIME\x10\x1e\x12\x1c\n\x18ORDER_STATUS_TP_EXECUTED\x10\x1f\x12\x1e\n\x1aORDER_STATUS_TP_CORRECTION\x10 \x12\x1e\n\x1aORDER_STATUS_TP_FORWARDING\x10!\x12#\n\x1fORDER_STATUS_TP_CORR_GUARD_TIME\x10\"*\x8c\x01\n\x0bValidBefore\x12\x1c\n\x18VALID_BEFORE_UNSPECIFIED\x10\x00\x12\x1b\n\x17VALID_BEFORE_END_OF_DAY\x10\x01\x12!\n\x1dVALID_BEFORE_GOOD_TILL_CANCEL\x10\x02\x12\x1f\n\x1bVALID_BEFORE_GOOD_TILL_DATE\x10\x03\x32\xb4\x05\n\rOrdersService\x12~\n\nPlaceOrder\x12\x1e.grpc.tradeapi.v1.orders.Order\x1a#.grpc.tradeapi.v1.orders.OrderState\"+\x82\xd3\xe4\x93\x02%\" /v1/accounts/{account_id}/orders:\x01*\x12\x94\x01\n\x0b\x43\x61ncelOrder\x12+.grpc.tradeapi.v1.orders.CancelOrderRequest\x1a#.grpc.tradeapi.v1.orders.OrderState\"3\x82\xd3\xe4\x93\x02-*+/v1/accounts/{account_id}/orders/{order_id}\x12\x86\x01\n\tGetOrders\x12&.grpc.tradeapi.v1.orders.OrdersRequest\x1a\'.grpc.tradeapi.v1.orders.OrdersResponse\"(\x82\xd3\xe4\x93\x02\"\x12 /v1/accounts/{account_id}/orders\x12\x8e\x01\n\x08GetOrder\x12(.grpc.tradeapi.v1.orders.GetOrderRequest\x1a#.grpc.tradeapi.v1.orders.OrderState\"3\x82\xd3\xe4\x93\x02-\x12+/v1/accounts/{account_id}/orders/{order_id}\x12r\n\x13SubscribeOrderTrade\x12*.grpc.tradeapi.v1.orders.OrderTradeRequest\x1a+.grpc.tradeapi.v1.orders.OrderTradeResponse(\x01\x30\x01\x42&P\x01Z\"trade_api/v1/orders/orders_serviceb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'grpc.tradeapi.v1.orders.orders_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'P\001Z\"trade_api/v1/orders/orders_service' - _globals['_ORDERSSERVICE'].methods_by_name['PlaceOrder']._loaded_options = None - _globals['_ORDERSSERVICE'].methods_by_name['PlaceOrder']._serialized_options = b'\202\323\344\223\002%\" /v1/accounts/{account_id}/orders:\001*' - _globals['_ORDERSSERVICE'].methods_by_name['CancelOrder']._loaded_options = None - _globals['_ORDERSSERVICE'].methods_by_name['CancelOrder']._serialized_options = b'\202\323\344\223\002-*+/v1/accounts/{account_id}/orders/{order_id}' - _globals['_ORDERSSERVICE'].methods_by_name['GetOrders']._loaded_options = None - _globals['_ORDERSSERVICE'].methods_by_name['GetOrders']._serialized_options = b'\202\323\344\223\002\"\022 /v1/accounts/{account_id}/orders' - _globals['_ORDERSSERVICE'].methods_by_name['GetOrder']._loaded_options = None - _globals['_ORDERSSERVICE'].methods_by_name['GetOrder']._serialized_options = b'\202\323\344\223\002-\022+/v1/accounts/{account_id}/orders/{order_id}' - _globals['_ORDERTYPE']._serialized_start=1793 - _globals['_ORDERTYPE']._serialized_end=1951 - _globals['_TIMEINFORCE']._serialized_start=1954 - _globals['_TIMEINFORCE']._serialized_end=2219 - _globals['_STOPCONDITION']._serialized_start=2221 - _globals['_STOPCONDITION']._serialized_end=2326 - _globals['_ORDERSTATUS']._serialized_start=2329 - _globals['_ORDERSTATUS']._serialized_end=3200 - _globals['_VALIDBEFORE']._serialized_start=3203 - _globals['_VALIDBEFORE']._serialized_end=3343 - _globals['_ORDERTRADEREQUEST']._serialized_start=223 - _globals['_ORDERTRADEREQUEST']._serialized_end=532 - _globals['_ORDERTRADEREQUEST_ACTION']._serialized_start=403 - _globals['_ORDERTRADEREQUEST_ACTION']._serialized_end=457 - _globals['_ORDERTRADEREQUEST_DATATYPE']._serialized_start=459 - _globals['_ORDERTRADEREQUEST_DATATYPE']._serialized_end=532 - _globals['_ORDERTRADERESPONSE']._serialized_start=534 - _globals['_ORDERTRADERESPONSE']._serialized_end=655 - _globals['_GETORDERREQUEST']._serialized_start=657 - _globals['_GETORDERREQUEST']._serialized_end=712 - _globals['_ORDER']._serialized_start=715 - _globals['_ORDER']._serialized_end=1225 - _globals['_LEG']._serialized_start=1227 - _globals['_LEG']._serialized_end=1326 - _globals['_ORDERSTATE']._serialized_start=1329 - _globals['_ORDERSTATE']._serialized_end=1622 - _globals['_ORDERSREQUEST']._serialized_start=1624 - _globals['_ORDERSREQUEST']._serialized_end=1659 - _globals['_ORDERSRESPONSE']._serialized_start=1661 - _globals['_ORDERSRESPONSE']._serialized_end=1730 - _globals['_CANCELORDERREQUEST']._serialized_start=1732 - _globals['_CANCELORDERREQUEST']._serialized_end=1790 - _globals['_ORDERSSERVICE']._serialized_start=3346 - _globals['_ORDERSSERVICE']._serialized_end=4038 -# @@protoc_insertion_point(module_scope) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/orders/orders_service_pb2_grpc.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/orders/orders_service_pb2_grpc.py deleted file mode 100644 index 27f555cf69..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/orders/orders_service_pb2_grpc.py +++ /dev/null @@ -1,254 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc -import warnings - -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1.orders import orders_service_pb2 as grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2 - -GRPC_GENERATED_VERSION = '1.74.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in grpc/tradeapi/v1/orders/orders_service_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) - - -class OrdersServiceStub(object): - def __init__(self, channel): - self.PlaceOrder = channel.unary_unary( - '/grpc.tradeapi.v1.orders.OrdersService/PlaceOrder', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.Order.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderState.FromString, - _registered_method=True) - self.CancelOrder = channel.unary_unary( - '/grpc.tradeapi.v1.orders.OrdersService/CancelOrder', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.CancelOrderRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderState.FromString, - _registered_method=True) - self.GetOrders = channel.unary_unary( - '/grpc.tradeapi.v1.orders.OrdersService/GetOrders', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrdersRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrdersResponse.FromString, - _registered_method=True) - self.GetOrder = channel.unary_unary( - '/grpc.tradeapi.v1.orders.OrdersService/GetOrder', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.GetOrderRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderState.FromString, - _registered_method=True) - self.SubscribeOrderTrade = channel.stream_stream( - '/grpc.tradeapi.v1.orders.OrdersService/SubscribeOrderTrade', - request_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderTradeRequest.SerializeToString, - response_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderTradeResponse.FromString, - _registered_method=True) - - -class OrdersServiceServicer(object): - - def PlaceOrder(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def CancelOrder(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetOrders(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetOrder(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SubscribeOrderTrade(self, request_iterator, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_OrdersServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'PlaceOrder': grpc.unary_unary_rpc_method_handler( - servicer.PlaceOrder, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.Order.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderState.SerializeToString, - ), - 'CancelOrder': grpc.unary_unary_rpc_method_handler( - servicer.CancelOrder, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.CancelOrderRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderState.SerializeToString, - ), - 'GetOrders': grpc.unary_unary_rpc_method_handler( - servicer.GetOrders, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrdersRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrdersResponse.SerializeToString, - ), - 'GetOrder': grpc.unary_unary_rpc_method_handler( - servicer.GetOrder, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.GetOrderRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderState.SerializeToString, - ), - 'SubscribeOrderTrade': grpc.stream_stream_rpc_method_handler( - servicer.SubscribeOrderTrade, - request_deserializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderTradeRequest.FromString, - response_serializer=grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderTradeResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'grpc.tradeapi.v1.orders.OrdersService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('grpc.tradeapi.v1.orders.OrdersService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class OrdersService(object): - @staticmethod - def PlaceOrder(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.orders.OrdersService/PlaceOrder', - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.Order.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderState.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def CancelOrder(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.orders.OrdersService/CancelOrder', - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.CancelOrderRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderState.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def GetOrders(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.orders.OrdersService/GetOrders', - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrdersRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrdersResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def GetOrder(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/grpc.tradeapi.v1.orders.OrdersService/GetOrder', - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.GetOrderRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderState.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def SubscribeOrderTrade(request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream( - request_iterator, - target, - '/grpc.tradeapi.v1.orders.OrdersService/SubscribeOrderTrade', - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderTradeRequest.SerializeToString, - grpc_dot_tradeapi_dot_v1_dot_orders_dot_orders__service__pb2.OrderTradeResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/side_pb2.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/side_pb2.py deleted file mode 100644 index bba498c485..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/side_pb2.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: grpc/tradeapi/v1/side.proto -# Protobuf Python Version: 6.31.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 31, - 1, - '', - 'grpc/tradeapi/v1/side.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bgrpc/tradeapi/v1/side.proto\x12\x10grpc.tradeapi.v1*9\n\x04Side\x12\x14\n\x10SIDE_UNSPECIFIED\x10\x00\x12\x0c\n\x08SIDE_BUY\x10\x01\x12\r\n\tSIDE_SELL\x10\x02\x42\x15P\x01Z\x11trade_api/v1/sideb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'grpc.tradeapi.v1.side_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'P\001Z\021trade_api/v1/side' - _globals['_SIDE']._serialized_start=49 - _globals['_SIDE']._serialized_end=106 -# @@protoc_insertion_point(module_scope) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/side_pb2_grpc.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/side_pb2_grpc.py deleted file mode 100644 index 3883ebb31b..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/side_pb2_grpc.py +++ /dev/null @@ -1,24 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc -import warnings - - -GRPC_GENERATED_VERSION = '1.74.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in grpc/tradeapi/v1/side_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/trade_pb2.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/trade_pb2.py deleted file mode 100644 index 6782a62009..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/trade_pb2.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: grpc/tradeapi/v1/trade.proto -# Protobuf Python Version: 6.31.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 31, - 1, - '', - 'grpc/tradeapi/v1/trade.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.type import decimal_pb2 as google_dot_type_dot_decimal__pb2 -from ziplime.data.data_sources.grpc.grpc_stubs.grpc.tradeapi.v1 import side_pb2 as grpc_dot_tradeapi_dot_v1_dot_side__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cgrpc/tradeapi/v1/trade.proto\x12\x10grpc.tradeapi.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19google/type/decimal.proto\x1a\x1bgrpc/tradeapi/v1/side.proto\"\xf4\x01\n\x0c\x41\x63\x63ountTrade\x12\x10\n\x08trade_id\x18\x01 \x01(\t\x12\x0e\n\x06symbol\x18\x02 \x01(\t\x12#\n\x05price\x18\x03 \x01(\x0b\x32\x14.google.type.Decimal\x12\"\n\x04size\x18\x04 \x01(\x0b\x32\x14.google.type.Decimal\x12$\n\x04side\x18\x05 \x01(\x0e\x32\x16.grpc.tradeapi.v1.Side\x12-\n\ttimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x10\n\x08order_id\x18\x07 \x01(\t\x12\x12\n\naccount_id\x18\x08 \x01(\tB\x16P\x01Z\x12trade_api/v1/tradeb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'grpc.tradeapi.v1.trade_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'P\001Z\022trade_api/v1/trade' - _globals['_ACCOUNTTRADE']._serialized_start=140 - _globals['_ACCOUNTTRADE']._serialized_end=384 -# @@protoc_insertion_point(module_scope) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/trade_pb2_grpc.py b/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/trade_pb2_grpc.py deleted file mode 100644 index 3aac6dd149..0000000000 --- a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/trade_pb2_grpc.py +++ /dev/null @@ -1,24 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc -import warnings - - -GRPC_GENERATED_VERSION = '1.74.0' -GRPC_VERSION = grpc.__version__ -_version_not_supported = False - -try: - from grpc._utilities import first_version_is_lower - _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) -except ImportError: - _version_not_supported = True - -if _version_not_supported: - raise RuntimeError( - f'The grpc package installed is at version {GRPC_VERSION},' - + f' but the generated code in grpc/tradeapi/v1/trade_pb2_grpc.py depends on' - + f' grpcio>={GRPC_GENERATED_VERSION}.' - + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' - + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' - ) diff --git a/ziplime/data/domain/data_bundle.py b/ziplime/data/domain/data_bundle.py index 0d1f5a50db..023c65f6a7 100644 --- a/ziplime/data/domain/data_bundle.py +++ b/ziplime/data/domain/data_bundle.py @@ -56,7 +56,62 @@ def get_dataframe_with_columns(self, columns: frozenset[str]) -> pl.DataFrame: @lru_cache def get_dataframe_by_sid_and_columns(self, sid: str, columns: frozenset[str]) -> pl.DataFrame: + return self.data.select(pl.col(col) for col in columns).filter(pl.col("sid").is_in(sid)) + def get_data_by_date_and_sids(self, fields: frozenset[str], + start_date: datetime.datetime, + end_date: datetime.datetime, + frequency: datetime.timedelta | Period, + sids: frozenset[int], + include_bounds: bool, + ) -> pl.DataFrame: + + frequency_td = period_to_timedelta(frequency) + sids_list = list(sids) + asset_sid = sids_list[0] + + if end_date > self.end_date: + raise ValueError(f"Requested end date {end_date} is greater than end date {self.end_date} of the bundle.") + if start_date < self.start_date: + raise ValueError(f"Requested start date {start_date} is lower than start date {self.start_date} of the bundle.") + + df = self.get_dataframe() + if fields is None: + fields = frozenset(df.columns) + cols = list(fields.union({"date", "sid"})) + + if include_bounds: + if len(sids) == 1: + sid_index = self.sid_indexes[asset_sid] + df_raw = self.get_dataframe()[sid_index[0]:sid_index[1]].select(pl.col(col) for col in cols).filter( + pl.col("date") >= start_date, + pl.col("date") <= end_date, + ) + else: + df_raw = self.get_dataframe().select(pl.col(col) for col in cols).filter( + pl.col("date") >= start_date, + pl.col("date") <= end_date, + pl.col("sid").is_in(sids_list) + ).sort(by=["sid", "date"]) + else: + if len(sids) == 1: + sid_index = self.sid_indexes[asset_sid] + df_raw = self.get_dataframe()[sid_index[0]:sid_index[1]].select(pl.col(col) for col in cols).filter( + pl.col("date") > start_date, + pl.col("date") < end_date, + ).sort(by="date") + + else: + df_raw = self.get_dataframe().select(pl.col(col) for col in cols).filter( + pl.col("date") > start_date, + pl.col("date") < end_date, + pl.col("sid").is_in(sids_list)).sort(by=["sid", "date"]) + + if self.frequency_td < frequency_td: + df = df_raw.group_by_dynamic( + index_column="date", every=frequency, by="sid").agg(pl.col(field).last() for field in fields) + return df + return df_raw def get_data_by_date(self, fields: frozenset[str], from_date: datetime.datetime, @@ -65,7 +120,9 @@ def get_data_by_date(self, fields: frozenset[str], assets: frozenset[Asset], include_bounds: bool, ) -> pl.DataFrame: - + return self.get_data_by_date_and_sids(fields=fields, from_date=from_date, + to_date=to_date, frequency=frequency, + sids=frozenset(asset.sid for asset in assets), include_bounds=include_bounds) cols = set(fields.union({"date", "sid"})) if include_bounds: asset_sid = [asset.sid for asset in assets][0] diff --git a/ziplime/data/services/data_source.py b/ziplime/data/services/data_source.py index 4c7ac12ebb..bcb6c17b42 100644 --- a/ziplime/data/services/data_source.py +++ b/ziplime/data/services/data_source.py @@ -179,3 +179,12 @@ def get_spot_value(self, assets: frozenset[Asset], fields: frozenset[str], dt: d include_end_date=True, ) return df_raw + + def get_data_by_date_and_sids(self, fields: frozenset[str], + start_date: datetime.datetime, + end_date: datetime.datetime, + frequency: datetime.timedelta | Period, + sids: frozenset[int], + include_bounds: bool, + ) -> pl.DataFrame: + ... diff --git a/ziplime/exchanges/exchange.py b/ziplime/exchanges/exchange.py index 1ba358c9ae..b359dfd148 100644 --- a/ziplime/exchanges/exchange.py +++ b/ziplime/exchanges/exchange.py @@ -22,13 +22,13 @@ class Exchange(DataSource): def __init__(self, name: str, canonical_name: str, country_code: str, clock: TradingClock, trading_calendar: ExchangeCalendar, - data_bundle: DataBundle | None = None): + data_source: DataBundle | None = None): self.name = name self.canonical_name = canonical_name self.country_code = country_code self.clock = clock self.trading_calendar = trading_calendar - self.data_bundle = data_bundle + self.data_source = data_source def get_start_cash_balance(self): pass diff --git a/ziplime/exchanges/simulation_exchange.py b/ziplime/exchanges/simulation_exchange.py index b7f673e170..96c0520dfa 100644 --- a/ziplime/exchanges/simulation_exchange.py +++ b/ziplime/exchanges/simulation_exchange.py @@ -10,7 +10,7 @@ from ziplime.assets.entities.equity import Equity from ziplime.assets.entities.futures_contract import FuturesContract from ziplime.constants.period import Period -from ziplime.data.domain.data_bundle import DataBundle +from ziplime.data.services.data_source import DataSource from ziplime.domain.position import Position from ziplime.domain.portfolio import Portfolio @@ -34,13 +34,13 @@ def __init__(self, future_slippage: SlippageModel, equity_commission: EquityCommissionModel, future_commission: FutureCommissionModel, - data_bundle: DataBundle = None, + data_source: DataSource = None, extra_data_sources: list = None ): super().__init__(name=name, canonical_name=name, clock=clock, - data_bundle=data_bundle, + data_source=data_source, country_code=country_code, trading_calendar=trading_calendar) self.slippage_models = { @@ -204,18 +204,18 @@ def current(self, assets: frozenset[Asset], fields: frozenset[str], dt: datetime # print(f"Getting current: {assets}, fields={fields}, dt={dt}") # TODO: check this, uncomment adjust_minutes # if not self._adjust_minutes: - # return self.data_bundle.get_spot_value( + # return self.data_source.get_spot_value( # assets=assets, # fields=fields, # dt=dt, - # frequency=self.data_bundle.frequency + # frequency=self.data_source.frequency # ) return self.get_data_by_limit( fields=fields, limit=1, end_date=dt, - frequency=self.data_bundle.frequency, + frequency=self.data_source.frequency, assets=assets, include_end_date=True, ) @@ -231,7 +231,7 @@ def get_data_by_period(self, include_end_date: bool, source: str ) -> pl.DataFrame: - return self.data_bundle.get_data_by_limit(fields=fields, + return self.data_source.get_data_by_limit(fields=fields, limit=limit, end_date=end_date, frequency=frequency, @@ -247,7 +247,7 @@ def get_data_by_limit(self, fields: frozenset[str], assets: frozenset[Asset], include_end_date: bool, ) -> pl.DataFrame: - return self.data_bundle.get_data_by_limit(fields=fields, + return self.data_source.get_data_by_limit(fields=fields, limit=limit, end_date=end_date, frequency=frequency, diff --git a/ziplime/finance/domain/ledger.py b/ziplime/finance/domain/ledger.py index 467bd4232e..e35fe9401b 100644 --- a/ziplime/finance/domain/ledger.py +++ b/ziplime/finance/domain/ledger.py @@ -289,7 +289,7 @@ def close_position(self, asset: Asset, dt: datetime.datetime): if txn is not None: self.process_transaction(transaction=txn) - def process_dividends(self, next_session, adjustment_reader): + async def process_dividends(self, next_session, adjustment_reader): """Process dividends for the next session. This will earn us any dividends whose ex-date is the next session as @@ -303,17 +303,17 @@ def process_dividends(self, next_session, adjustment_reader): held_sids = set(position_tracker.positions) if held_sids: cash_dividends = adjustment_reader.get_dividends_with_ex_date( - held_sids, next_session, None # self.data_bundle.asset_repository + assets=held_sids, date=next_session # self.data_bundle.asset_repository ) - stock_dividends = adjustment_reader.get_stock_dividends_with_ex_date( - held_sids, next_session, None # self.data_bundle.asset_repository + stock_dividends = await adjustment_reader.get_stock_dividends_with_ex_date( + assets=held_sids, date=next_session # self.data_bundle.asset_repository ) # Earning a dividend just marks that we need to get paid out on # the dividend's pay-date. This does not affect our cash yet. position_tracker.earn_dividends( - cash_dividends, - stock_dividends, + cash_dividends=cash_dividends, + stock_dividends=stock_dividends, ) # Pay out the dividends whose pay-date is the next session. This does diff --git a/ziplime/finance/metrics_tracker.py b/ziplime/finance/metrics_tracker.py index 4e985ce4b6..6414ddb1cf 100644 --- a/ziplime/finance/metrics_tracker.py +++ b/ziplime/finance/metrics_tracker.py @@ -144,7 +144,7 @@ def handle_minute_close(self, dt: datetime.datetime): ) return packet - def handle_market_open(self, session_label: datetime.datetime) -> None: + async def handle_market_open(self, session_label: datetime.datetime) -> None: """Handles the start of each session. Parameters @@ -158,7 +158,7 @@ def handle_market_open(self, session_label: datetime.datetime) -> None: adjustment_reader = self.asset_service._adjustments_repository if adjustment_reader is not None: # this is None when running with a dataframe source - self._ledger.process_dividends( + await self._ledger.process_dividends( next_session=session_label, adjustment_reader=adjustment_reader, ) diff --git a/ziplime/gens/domain/single_execution_clock.py b/ziplime/gens/domain/single_execution_clock.py new file mode 100644 index 0000000000..d3a6390b26 --- /dev/null +++ b/ziplime/gens/domain/single_execution_clock.py @@ -0,0 +1,102 @@ +import datetime + +import polars as pl +from exchange_calendars import ExchangeCalendar + +from ziplime.trading.enums.simulation_event import SimulationEvent +from ziplime.gens.domain.trading_clock import TradingClock + + +class SingleExecutionClock(TradingClock): + def __init__(self, + start_date: datetime.datetime, + end_date: datetime.datetime, + trading_calendar: ExchangeCalendar, + emission_rate: datetime.timedelta, + execute_on_period_end_bool: bool = True): + super().__init__(trading_calendar=trading_calendar, emission_rate=emission_rate) + if start_date >= end_date: + raise ValueError("Period start falls after period end.") + if start_date >= trading_calendar.last_session.replace(tzinfo=trading_calendar.tz): + raise ValueError("Period start falls after the last known trading day.") + if end_date <= trading_calendar.first_session.replace(tzinfo=trading_calendar.tz): + raise ValueError("Period end falls before the first known trading day.") + + # chop off any minutes or hours on the given start and end dates, + # as we only support session labels here (and we represent session + # labels as midnight UTC). + + self.start_date = start_date + self.end_date = end_date + + self.start_session = start_date.date() + self.end_session = end_date.date() + + if trading_calendar.sessions_distance(self.start_session, self.end_session) < 1: + raise Exception( + f"There are no trading days between {self.start_session} and {self.end_session}") + if not trading_calendar.is_session(self.start_session): + # if the start date is not a valid session in this calendar, + # push it forward to the first valid session + self.start_session = trading_calendar.minute_to_session( + self.start_session + ).tz_localize(self.trading_calendar.tz).to_pydatetime().date() + + if not trading_calendar.is_session(self.end_session): + # if the end date is not a valid session in this calendar, + # pull it backward to the last valid session before the given + # end date. + self.end_session = trading_calendar.minute_to_session( + self.end_session, direction="previous" + ).tz_localize(self.trading_calendar.tz).to_pydatetime().date() + + self.first_open = trading_calendar.session_first_minute( + self.start_session + ).tz_convert(self.trading_calendar.tz).to_pydatetime() + self.last_close = trading_calendar.session_close( + self.end_session + ).tz_convert(self.trading_calendar.tz).to_pydatetime() + + self.sessions = pl.Series(self.trading_calendar.sessions_in_range( + self.start_session, self.end_session) + ).dt.date() + + self.market_closes = pl.Series( + self.trading_calendar.schedule.loc[self.sessions, "close"].dt.tz_convert( + self.trading_calendar.tz)) + self.market_opens = pl.Series( + self.trading_calendar.first_minutes.loc[self.sessions].dt.tz_convert( + self.trading_calendar.tz)) + + self.before_trading_start_minutes = self.market_opens - datetime.timedelta(minutes=46) + self.minutes_by_session = self.calc_minutes_by_session() + self.execute_on_period_end_bool=execute_on_period_end_bool + + def calc_minutes_by_session(self): + minutes_by_session_n = {} + if self.emission_rate < datetime.timedelta(days=1): + for session_idx, session in enumerate(self.sessions): + minutes = pl.datetime_range(self.market_opens[session_idx], self.market_closes[session_idx], + interval=self.emission_rate, + eager=True) + minutes_by_session_n[session] = minutes + else: + minutes_by_session_n = {session: pl.Series([self.market_closes[session_idx]]) for + session_idx, session in enumerate(self.sessions)} + return minutes_by_session_n + + def __iter__(self): + if self.execute_on_period_end_bool: + bts_minute = self.before_trading_start_minutes[-1] + session = self.sessions[-1] + minute = self.minutes_by_session[session][-1] + else: + bts_minute = self.before_trading_start_minutes[0] + session = self.sessions[0] + minute = self.minutes_by_session[session][0] + + yield session, SimulationEvent.SESSION_START + yield bts_minute, SimulationEvent.BEFORE_TRADING_START_BAR + yield minute, SimulationEvent.BAR + yield minute, SimulationEvent.EMISSION_RATE_END + yield minute, SimulationEvent.SESSION_END diff --git a/ziplime/lib/adjusted_array.py b/ziplime/lib/adjusted_array.py index e96917c90c..a07cdbba81 100644 --- a/ziplime/lib/adjusted_array.py +++ b/ziplime/lib/adjusted_array.py @@ -1,8 +1,5 @@ -from textwrap import dedent -from functools import partial import numpy as np -from toolz import merge_with from ziplime.errors import ( WindowLengthNotPositive, WindowLengthTooLong, @@ -114,46 +111,6 @@ def _normalize_array(data, missing_value): ) -def _merge_simple(adjustment_lists, front_idx, back_idx): - """ - Merge lists of new and existing adjustments for a given index by appending - or prepending new adjustments to existing adjustments. - - Notes - ----- - This method is meant to be used with ``toolz.merge_with`` to merge - adjustment mappings. In case of a collision ``adjustment_lists`` contains - two lists, existing adjustments at index 0 and new adjustments at index 1. - When there are no collisions, ``adjustment_lists`` contains a single list. - - Parameters - ---------- - adjustment_lists : list[list[Adjustment]] - List(s) of new and/or existing adjustments for a given index. - front_idx : int - Index of list in ``adjustment_lists`` that should be used as baseline - in case of a collision. - back_idx : int - Index of list in ``adjustment_lists`` that should extend baseline list - in case of a collision. - - Returns - ------- - adjustments : list[Adjustment] - List of merged adjustments for a given index. - """ - if len(adjustment_lists) == 1: - return list(adjustment_lists[0]) - else: - return adjustment_lists[front_idx] + adjustment_lists[back_idx] - - -_merge_methods = { - "append": partial(_merge_simple, front_idx=0, back_idx=1), - "prepend": partial(_merge_simple, front_idx=1, back_idx=0), -} - - class AdjustedArray: """ An array that can be iterated with a variable-length window, and which can @@ -199,35 +156,6 @@ def copy(self): self.missing_value, ) - def update_adjustments(self, adjustments, method): - """ - Merge ``adjustments`` with existing adjustments, handling index - collisions according to ``method``. - - Parameters - ---------- - adjustments : dict[int -> list[Adjustment]] - The mapping of row indices to lists of adjustments that should be - appended to existing adjustments. - method : {'append', 'prepend'} - How to handle index collisions. If 'append', new adjustments will - be applied after previously-existing adjustments. If 'prepend', new - adjustments will be applied before previously-existing adjustments. - """ - try: - merge_func = _merge_methods[method] - except KeyError as exc: - raise ValueError( - "Invalid merge method %s\n" - "Valid methods are: %s" % (method, ", ".join(_merge_methods)) - ) from exc - - self.adjustments = merge_with( - merge_func, - self.adjustments, - adjustments, - ) - @property def data(self): """ @@ -291,45 +219,6 @@ def traverse(self, window_length, offset=0, perspective_offset=0, copy=True): rounding_places=None, ) - def inspect(self): - """ - Return a string representation of the data stored in this array. - """ - return dedent( - """\ - Adjusted Array ({dtype}): - - Data: - {data!r} - - Adjustments: - {adjustments} - """ - ).format( - dtype=self.dtype.name, - data=self.data, - adjustments=self.adjustments, - ) - - def update_labels(self, func): - """ - Map a function over baseline and adjustment values in place. - - Note that the baseline data values must be a LabelArray. - """ - if not isinstance(self.data, LabelArray): - raise TypeError( - "update_labels only supported if data is of type LabelArray." - ) - - # Map the baseline values. - self._data = self._data.map(func) - - # Map each of the adjustments. - for _, row_adjustments in self.adjustments.items(): - for adjustment in row_adjustments: - adjustment.value = func(adjustment.value) - def ensure_adjusted_array(ndarray_or_adjusted_array, missing_value): if isinstance(ndarray_or_adjusted_array, AdjustedArray): diff --git a/ziplime/pipeline/__init__.py b/ziplime/pipeline/__init__.py index 9c7f978a94..e5e962a648 100644 --- a/ziplime/pipeline/__init__.py +++ b/ziplime/pipeline/__init__.py @@ -1,8 +1,13 @@ -from .classifiers import Classifier, CustomClassifier +from .terms.classifiers.custom_classifier import CustomClassifier +from .terms.classifiers.classifier import Classifier from .domain import Domain -from .factors import Factor, CustomFactor -from .filters import Filter, CustomFilter -from .term import Term, LoadableTerm, ComputableTerm +from .terms.computable_term import ComputableTerm +from .terms.factors import Factor, CustomFactor +from .terms.filters import Filter, CustomFilter + +from .terms.loadable_term import LoadableTerm +from .terms.term import Term + from .graph import ExecutionPlan, TermGraph # NOTE: this needs to come after the import of `graph`, or else we get circular diff --git a/ziplime/pipeline/asset_calendar_domain.py b/ziplime/pipeline/asset_calendar_domain.py new file mode 100644 index 0000000000..e70675bfcc --- /dev/null +++ b/ziplime/pipeline/asset_calendar_domain.py @@ -0,0 +1,67 @@ +import datetime + +import numpy as np +import pandas as pd + +from ziplime.assets.entities.asset import Asset +from ziplime.pipeline import Domain +from ziplime.utils.calendar_utils import get_calendar + + +class AssetCalendarDomain(Domain): + """An equity domain whose sessions are defined by a named TradingCalendar. + + Parameters + ---------- + country_code : str + ISO-3166 two-letter country code of the domain + calendar_name : str + Name of the calendar, to be looked by by trading_calendar.get_calendar. + data_query_offset : np.timedelta64 + The offset from market open when data should no longer be considered + available for a session. For example, a ``data_query_offset`` of + ``-np.timedelta64(45, 'm')`` means that the data must have + been available at least 45 minutes prior to market open for it to + appear in the pipeline input for the given session. + """ + + def __init__( + self, assets: list[Asset], calendar_name:str, data_query_offset=-np.timedelta64(45, "m") + ): + super().__init__() + self.calendar_name = calendar_name + self._data_query_offset = ( + # add one minute because `open_time` is actually the open minute + # label which is one minute _after_ market open... + data_query_offset + - np.timedelta64(1, "m") + ) + if data_query_offset >= datetime.timedelta(0): + raise ValueError( + "data must be ready before market open (offset must be < 0)", + ) + self._calendar = get_calendar(self.calendar_name) + self.assets = assets + + @property + def country_code(self): + return None + + @property + def calendar(self): + return self._calendar + + def sessions(self): + return self.calendar.sessions + + def data_query_cutoff_for_sessions(self, sessions): + opens = self.calendar.first_minutes.reindex(sessions) + missing_mask = pd.isnull(opens) + if missing_mask.any(): + missing_days = sessions[missing_mask] + raise ValueError( + "cannot resolve data query time for sessions that are not on" + f" the {self.calendar_name} calendar:\n{missing_days}" + ) + + return pd.DatetimeIndex(opens) + self._data_query_offset diff --git a/ziplime/pipeline/classifiers/__init__.py b/ziplime/pipeline/classifiers/__init__.py deleted file mode 100644 index 8e739fb7a6..0000000000 --- a/ziplime/pipeline/classifiers/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .classifier import ( - Classifier, - CustomClassifier, - Quantiles, - Everything, - Latest, -) - -__all__ = [ - "Classifier", - "CustomClassifier", - "Everything", - "Latest", - "Quantiles", -] diff --git a/ziplime/pipeline/common.py b/ziplime/pipeline/common.py index 9a859b3477..b0bfba243f 100644 --- a/ziplime/pipeline/common.py +++ b/ziplime/pipeline/common.py @@ -15,4 +15,25 @@ EVENT_DATE_FIELD_NAME = "event_date" SID_FIELD_NAME = "sid" + TS_FIELD_NAME = "timestamp" +INVALID_NUM_QTRS_MESSAGE = ( + "Passed invalid number of quarters %s; " "must pass a number of quarters >= 0" +) +NEXT_FISCAL_QUARTER = "next_fiscal_quarter" +NEXT_FISCAL_YEAR = "next_fiscal_year" +NORMALIZED_QUARTERS = "normalized_quarters" +PREVIOUS_FISCAL_QUARTER = "previous_fiscal_quarter" +PREVIOUS_FISCAL_YEAR = "previous_fiscal_year" +SHIFTED_NORMALIZED_QTRS = "shifted_normalized_quarters" +SIMULATION_DATES = "dates" +# These metadata columns are used to align event indexers. +metadata_columns = frozenset( + { + TS_FIELD_NAME, + SID_FIELD_NAME, + EVENT_DATE_FIELD_NAME, + FISCAL_QUARTER_FIELD_NAME, + FISCAL_YEAR_FIELD_NAME, + } +) diff --git a/ziplime/pipeline/data/__init__.py b/ziplime/pipeline/data/__init__.py index ab35583bb3..ac54cecd96 100644 --- a/ziplime/pipeline/data/__init__.py +++ b/ziplime/pipeline/data/__init__.py @@ -1,11 +1,9 @@ -from .equity_pricing import EquityPricing, USEquityPricing -from .dataset import ( - BoundColumn, - Column, - DataSet, - DataSetFamily, - DataSetFamilySlice, -) +from .column import Column +from .dataset import DataSet +from .dataset_family import DataSetFamily +from .dataset_family_slice import DataSetFamilySlice +from .equity_pricing import EquityPricing#, USEquityPricing +from ..terms.bound_column import BoundColumn __all__ = [ "BoundColumn", @@ -14,5 +12,5 @@ "EquityPricing", "DataSetFamily", "DataSetFamilySlice", - "USEquityPricing", + # "USEquityPricing", ] diff --git a/ziplime/pipeline/data/bound_column_descr.py b/ziplime/pipeline/data/bound_column_descr.py new file mode 100644 index 0000000000..ab0a19142d --- /dev/null +++ b/ziplime/pipeline/data/bound_column_descr.py @@ -0,0 +1,57 @@ +from ziplime.pipeline.terms.bound_column import BoundColumn +from ziplime.pipeline.terms.utils import validate_dtype +from ziplime.utils.numpy_utils import NoDefaultMissingValue + + +class BoundColumnDescr: + """ + Intermediate class that sits on `DataSet` objects and returns memoized + `BoundColumn` objects when requested. + + This exists so that subclasses of DataSets don't share columns with their + parent classes. + """ + + def __init__(self, dtype, missing_value, name, doc, metadata, currency_aware): + # Validating and calculating default missing values here guarantees + # that we fail quickly if the user passes an unsupporte dtype or fails + # to provide a missing value for a dtype that requires one + # (e.g. int64), but still enables us to provide an error message that + # points to the name of the failing column. + try: + self.dtype, self.missing_value = validate_dtype( + termname="Column(name={name!r})".format(name=name), + dtype=dtype, + missing_value=missing_value, + ) + except NoDefaultMissingValue as exc: + # Re-raise with a more specific message. + raise NoDefaultMissingValue( + "Failed to create Column with name {name!r} and" + " dtype {dtype} because no missing_value was provided\n\n" + "Columns with dtype {dtype} require a missing_value.\n" + "Please pass missing_value to Column() or use a different" + " dtype.".format(dtype=dtype, name=name) + ) from exc + self.name = name + self.doc = doc + self.metadata = metadata + self.currency_aware = currency_aware + + def __get__(self, instance, owner): + """ + Produce a concrete BoundColumn object when accessed. + + We don't bind to datasets at class creation time so that subclasses of + DataSets produce different BoundColumns. + """ + return BoundColumn( + dtype=self.dtype, + missing_value=self.missing_value, + dataset=owner, + name=self.name, + doc=self.doc, + metadata=self.metadata, + currency_conversion=None, + currency_aware=self.currency_aware, + ) \ No newline at end of file diff --git a/ziplime/pipeline/data/column.py b/ziplime/pipeline/data/column.py new file mode 100644 index 0000000000..fad7006553 --- /dev/null +++ b/ziplime/pipeline/data/column.py @@ -0,0 +1,44 @@ +from ziplime.utils.numpy_utils import float64_dtype + + +class Column: + """ + An abstract column of data, not yet associated with a dataset. + """ + + def __init__( + self, + dtype, + missing_value=None, + doc=None, + metadata=None, + currency_aware=False, + ): + if currency_aware and dtype != float64_dtype: + raise ValueError( + "Columns cannot be constructed with currency_aware={}, " + "dtype={}. Currency aware columns must have a float64 dtype.".format( + currency_aware, dtype + ) + ) + + self.dtype = dtype + self.missing_value = missing_value + self.doc = doc + self.metadata = metadata.copy() if metadata is not None else {} + self.currency_aware = currency_aware + + def bind(self, name): + from ziplime.pipeline.data.bound_column_descr import BoundColumnDescr + + """ + Bind a `Column` object to its name. + """ + return BoundColumnDescr( + dtype=self.dtype, + missing_value=self.missing_value, + name=name, + doc=self.doc, + metadata=self.metadata, + currency_aware=self.currency_aware, + ) diff --git a/ziplime/pipeline/data/constants.py b/ziplime/pipeline/data/constants.py new file mode 100644 index 0000000000..0b190e3500 --- /dev/null +++ b/ziplime/pipeline/data/constants.py @@ -0,0 +1 @@ +IsSpecialization = "IsSpecialization" diff --git a/ziplime/pipeline/data/dataset.py b/ziplime/pipeline/data/dataset.py index 838f9d85d7..9c50d8316b 100644 --- a/ziplime/pipeline/data/dataset.py +++ b/ziplime/pipeline/data/dataset.py @@ -1,543 +1,10 @@ -import abc -from collections import namedtuple, OrderedDict -from itertools import repeat -from textwrap import dedent -from weakref import WeakKeyDictionary +from collections import namedtuple +from ziplime.pipeline.data.bound_column_descr import BoundColumnDescr +from ziplime.pipeline.data.dataset_meta import DataSetMeta -from toolz import first -from ziplime.currency import Currency -from ziplime.data.fx import DEFAULT_FX_RATE - -from ziplime.pipeline.classifiers import Classifier, Latest as LatestClassifier -from ziplime.pipeline.domain import Domain, GENERIC -from ziplime.pipeline.factors import Factor, Latest as LatestFactor -from ziplime.pipeline.filters import Filter, Latest as LatestFilter -from ziplime.pipeline.term import ( - AssetExists, - LoadableTerm, - validate_dtype, -) -from ziplime.utils.formatting import s, plural -from ziplime.utils.numpy_utils import float64_dtype, NoDefaultMissingValue +from ziplime.pipeline.domain import GENERIC from ziplime.utils.string_formatting import bulleted_list -IsSpecialization = "IsSpecialization" - - -class Column: - """ - An abstract column of data, not yet associated with a dataset. - """ - - def __init__( - self, - dtype, - missing_value=None, - doc=None, - metadata=None, - currency_aware=False, - ): - if currency_aware and dtype != float64_dtype: - raise ValueError( - "Columns cannot be constructed with currency_aware={}, " - "dtype={}. Currency aware columns must have a float64 dtype.".format( - currency_aware, dtype - ) - ) - - self.dtype = dtype - self.missing_value = missing_value - self.doc = doc - self.metadata = metadata.copy() if metadata is not None else {} - self.currency_aware = currency_aware - - def bind(self, name): - """ - Bind a `Column` object to its name. - """ - return _BoundColumnDescr( - dtype=self.dtype, - missing_value=self.missing_value, - name=name, - doc=self.doc, - metadata=self.metadata, - currency_aware=self.currency_aware, - ) - - -class _BoundColumnDescr: - """ - Intermediate class that sits on `DataSet` objects and returns memoized - `BoundColumn` objects when requested. - - This exists so that subclasses of DataSets don't share columns with their - parent classes. - """ - - def __init__(self, dtype, missing_value, name, doc, metadata, currency_aware): - # Validating and calculating default missing values here guarantees - # that we fail quickly if the user passes an unsupporte dtype or fails - # to provide a missing value for a dtype that requires one - # (e.g. int64), but still enables us to provide an error message that - # points to the name of the failing column. - try: - self.dtype, self.missing_value = validate_dtype( - termname="Column(name={name!r})".format(name=name), - dtype=dtype, - missing_value=missing_value, - ) - except NoDefaultMissingValue as exc: - # Re-raise with a more specific message. - raise NoDefaultMissingValue( - "Failed to create Column with name {name!r} and" - " dtype {dtype} because no missing_value was provided\n\n" - "Columns with dtype {dtype} require a missing_value.\n" - "Please pass missing_value to Column() or use a different" - " dtype.".format(dtype=dtype, name=name) - ) from exc - self.name = name - self.doc = doc - self.metadata = metadata - self.currency_aware = currency_aware - - def __get__(self, instance, owner): - """ - Produce a concrete BoundColumn object when accessed. - - We don't bind to datasets at class creation time so that subclasses of - DataSets produce different BoundColumns. - """ - return BoundColumn( - dtype=self.dtype, - missing_value=self.missing_value, - dataset=owner, - name=self.name, - doc=self.doc, - metadata=self.metadata, - currency_conversion=None, - currency_aware=self.currency_aware, - ) - - -class BoundColumn(LoadableTerm): - """ - A column of data that's been concretely bound to a particular dataset. - - Attributes - ---------- - dtype : numpy.dtype - The dtype of data produced when this column is loaded. - latest : ziplime.pipeline.LoadableTerm - A :class:`~ziplime.pipeline.Filter`, :class:`~ziplime.pipeline.Factor`, - or :class:`~ziplime.pipeline.Classifier` computing the most recently - known value of this column on each date. - See :class:`ziplime.pipeline.mixins.LatestMixin` for more details. - dataset : ziplime.pipeline.data.DataSet - The dataset to which this column is bound. - name : str - The name of this column. - metadata : dict - Extra metadata associated with this column. - currency_aware : bool - Whether or not this column produces currency-denominated data. - - Notes - ----- - Instances of this class are dynamically created upon access to attributes - of :class:`~ziplime.pipeline.data.DataSet`. For example, - :attr:`~ziplime.pipeline.data.EquityPricing.close` is an instance of this - class. Pipeline API users should never construct instances of this - directly. - """ - - mask = AssetExists() - window_safe = True - - def __new__( - cls, - dtype, - missing_value, - dataset, - name, - doc, - metadata, - currency_conversion, - currency_aware, - ): - if currency_aware and dtype != float64_dtype: - raise AssertionError( - "The {} column on dataset {} cannot be constructed with " - "currency_aware={}, dtype={}. Currency aware columns must " - "have a float64 dtype.".format( - name, - dataset, - currency_aware, - dtype, - ) - ) - - return super(BoundColumn, cls).__new__( - cls, - domain=dataset.domain, - dtype=dtype, - missing_value=missing_value, - dataset=dataset, - name=name, - ndim=dataset.ndim, - doc=doc, - metadata=metadata, - currency_conversion=currency_conversion, - currency_aware=currency_aware, - ) - - def _init( - self, - dataset, - name, - doc, - metadata, - currency_conversion, - currency_aware, - *args, - **kwargs, - ): - self._dataset = dataset - self._name = name - self.__doc__ = doc - self._metadata = metadata - self._currency_conversion = currency_conversion - self._currency_aware = currency_aware - return super(BoundColumn, self)._init(*args, **kwargs) - - @classmethod - def _static_identity( - cls, - dataset, - name, - doc, - metadata, - currency_conversion, - currency_aware, - *args, - **kwargs, - ): - return ( - super(BoundColumn, cls)._static_identity(*args, **kwargs), - dataset, - name, - doc, - frozenset(sorted(metadata.items(), key=first)), - currency_conversion, - currency_aware, - ) - - def __lt__(self, other): - msg = "Can't compare '{}' with '{}'. (Did you mean to use '.latest'?)" - raise TypeError(msg.format(self.qualname, other.__class__.__name__)) - - __gt__ = __le__ = __ge__ = __lt__ - - def _replace(self, **kwargs): - kw = dict( - dtype=self.dtype, - missing_value=self.missing_value, - dataset=self._dataset, - name=self._name, - doc=self.__doc__, - metadata=self._metadata, - currency_conversion=self._currency_conversion, - currency_aware=self._currency_aware, - ) - kw.update(kwargs) - - return type(self)(**kw) - - def specialize(self, domain): - """Specialize ``self`` to a concrete domain.""" - if domain == self.domain: - return self - - return self._replace(dataset=self._dataset.specialize(domain)) - - def unspecialize(self): - """ - Unspecialize a column to its generic form. - - This is equivalent to ``column.specialize(GENERIC)``. - """ - return self.specialize(GENERIC) - - def fx(self, currency: str | Currency): - """ - Construct a currency-converted version of this column. - - Parameters - ---------- - currency : str or ziplime.currency.Currency - Currency into which to convert this column's data. - - Returns - ------- - column : BoundColumn - Column producing the same data as ``self``, but currency-converted - into ``currency``. - """ - conversion = self._currency_conversion - - if not self._currency_aware: - raise TypeError( - "The .fx() method cannot be called on {} because it does not " - "produce currency-denominated data.".format(self.qualname) - ) - elif conversion is not None and conversion.currency == currency: - return self - - return self._replace( - currency_conversion=CurrencyConversion( - currency=currency, - field=DEFAULT_FX_RATE, - ) - ) - - @property - def currency_conversion(self): - """Specification for currency conversions applied for this term.""" - return self._currency_conversion - - @property - def currency_aware(self): - """ - Whether or not this column produces currency-denominated data. - """ - return self._currency_aware - - @property - def dataset(self): - """ - The dataset to which this column is bound. - """ - return self._dataset - - @property - def name(self): - """ - The name of this column. - """ - return self._name - - @property - def metadata(self): - """ - A copy of the metadata for this column. - """ - return self._metadata.copy() - - @property - def qualname(self): - """The fully-qualified name of this column.""" - out = ".".join([self.dataset.qualname, self.name]) - conversion = self._currency_conversion - if conversion is not None: - out += ".fx({!r})".format(conversion.currency.code) - return out - - @property - def latest(self): - dtype = self.dtype - if dtype in Filter.ALLOWED_DTYPES: - Latest = LatestFilter - elif dtype in Classifier.ALLOWED_DTYPES: - Latest = LatestClassifier - else: - assert dtype in Factor.ALLOWED_DTYPES, "Unknown dtype %s." % dtype - Latest = LatestFactor - - return Latest( - inputs=(self,), - dtype=dtype, - missing_value=self.missing_value, - ndim=self.ndim, - ) - - def __repr__(self): - return "{qualname}::{dtype}".format( - qualname=self.qualname, - dtype=self.dtype.name, - ) - - def graph_repr(self): - """Short repr to use when rendering Pipeline graphs.""" - # Graphviz interprets `\l` as "divide label into lines, left-justified" - return "BoundColumn:\\l Dataset: {}\\l Column: {}\\l".format( - self.dataset.__name__, self.name - ) - - def recursive_repr(self): - """Short repr used to render in recursive contexts.""" - return self.qualname - - -class DataSetMeta(type): - """ - Metaclass for DataSets - - Supplies name and dataset information to Column attributes, and manages - families of specialized dataset. - """ - - def __new__(metacls, name, bases, dict_): - if len(bases) != 1: - # Disallowing multiple inheritance makes it easier for us to - # determine whether a given dataset is the root for its family of - # specializations. - raise TypeError("Multiple dataset inheritance is not supported.") - - # This marker is set in the class dictionary by `specialize` below. - is_specialization = dict_.pop(IsSpecialization, False) - - newtype = super(DataSetMeta, metacls).__new__(metacls, name, bases, dict_) - - if not isinstance(newtype.domain, Domain): - raise TypeError( - "Expected a Domain for {}.domain, but got {} instead.".format( - newtype.__name__, - type(newtype.domain), - ) - ) - - # Collect all of the column names that we inherit from our parents. - column_names = set().union( - *(getattr(base, "_column_names", ()) for base in bases) - ) - - # Collect any new columns from this dataset. - for maybe_colname, maybe_column in dict_.items(): - if isinstance(maybe_column, Column): - # add column names defined on our class - bound_column_descr = maybe_column.bind(maybe_colname) - setattr(newtype, maybe_colname, bound_column_descr) - column_names.add(maybe_colname) - - newtype._column_names = frozenset(column_names) - - if not is_specialization: - # This is the new root of a family of specializations. Store the - # memoized dictionary for family on this type. - newtype._domain_specializations = WeakKeyDictionary( - { - newtype.domain: newtype, - } - ) - - return newtype - - def specialize(cls, domain: Domain): - """ - Specialize a generic DataSet to a concrete domain. - - Parameters - ---------- - domain : ziplime.pipeline.domain.Domain - Domain to which we should generate a specialization. - - Returns - ------- - specialized : type - A new :class:`~ziplime.pipeline.data.DataSet` subclass with the - same columns as ``self``, but specialized to ``domain``. - """ - # We're already the specialization to this domain, so just return self. - if domain == cls.domain: - return cls - - try: - return cls._domain_specializations[domain] - except KeyError as exc: - if not cls._can_create_new_specialization(domain): - # This either means we're already a specialization and trying - # to create a new specialization, or we're the generic version - # of a root-specialized dataset, which we don't want to create - # new specializations of. - raise ValueError( - "Can't specialize {dataset} from {current} to new domain {new}.".format( - dataset=cls.__name__, - current=cls.domain, - new=domain, - ) - ) from exc - new_type = cls._create_specialization(domain) - cls._domain_specializations[domain] = new_type - return new_type - - def unspecialize(cls): - """ - Unspecialize a dataset to its generic form. - - This is equivalent to ``dataset.specialize(GENERIC)``. - """ - return cls.specialize(GENERIC) - - def _can_create_new_specialization(cls, domain): - # Always allow specializing to a generic domain. - if domain is GENERIC: - return True - elif "_domain_specializations" in vars(cls): - # This branch is True if we're the root of a family. - # Allow specialization if we're generic. - return cls.domain is GENERIC - else: - # If we're not the root of a family, we can't create any new - # specializations. - return False - - def _create_specialization(cls, domain): - # These are all assertions because we should have handled these cases - # already in specialize(). - assert isinstance(domain, Domain) - assert ( - domain not in cls._domain_specializations - ), "Domain specializations should be memoized!" - if domain is not GENERIC: - assert ( - cls.domain is GENERIC - ), "Can't specialize dataset with domain {} to domain {}.".format( - cls.domain, - domain, - ) - - # Create a new subclass of ``self`` with the given domain. - # Mark that it's a specialization so that we know not to create a new - # family for it. - name = cls.__name__ - bases = (cls,) - dict_ = {"domain": domain, IsSpecialization: True} - out = type(name, bases, dict_) - out.__module__ = cls.__module__ - return out - - @property - def columns(cls): - return frozenset(getattr(cls, colname) for colname in cls._column_names) - - @property - def qualname(cls): - if cls.domain is GENERIC: - specialization_key = "" - else: - specialization_key = "<" + cls.domain.country_code + ">" - - return cls.__name__ + specialization_key - - # NOTE: We used to use `functools.total_ordering` to account for all of the - # other rich comparison methods, but it has issues in python 3 and - # this method is only used for test purposes, so for now we will just - # keep this in isolation. If we ever need any of the other comparison - # methods we will have to implement them individually. - def __lt__(cls, other): - return id(cls) < id(other) - - def __repr__(cls): - return "" % (cls.__name__, cls.domain) class DataSet(object, metaclass=DataSetMeta): @@ -643,7 +110,7 @@ def get_column(cls, name): clsdict = vars(cls) try: maybe_column = clsdict[name] - if not isinstance(maybe_column, _BoundColumnDescr): + if not isinstance(maybe_column, BoundColumnDescr): raise KeyError(name) except KeyError as exc: raise AttributeError( @@ -669,323 +136,6 @@ def get_column(cls, name): # version of this attribute if we fail to set this in a subclass somewhere. del DataSet._domain_specializations - -class DataSetFamilyLookupError(AttributeError): - """Exception thrown when a column is accessed on a DataSetFamily - instead of on the result of a slice. - - Parameters - ---------- - family_name : str - The name of the DataSetFamily on which the access occurred. - column_name : str - The name of the column accessed. - """ - - def __init__(self, family_name, column_name): - self.family_name = family_name - self.column_name = column_name - - def __str__(self): - # NOTE: when ``aggregate`` is added, remember to update this message - return dedent( - """\ - Attempted to access column {c} from DataSetFamily {d}: - - To work with dataset families, you must first select a - slice using the ``slice`` method: - - {d}.slice(...).{c} - """.format( - c=self.column_name, d=self.family_name - ) - ) - - -class _DataSetFamilyColumn: - """Descriptor used to raise a helpful error when a column is accessed on a - DataSetFamily instead of on the result of a slice. - - Parameters - ---------- - column_names : str - The name of the column. - """ - - def __init__(self, column_name): - self.column_name = column_name - - def __get__(self, instance, owner): - raise DataSetFamilyLookupError( - owner.__name__, - self.column_name, - ) - - -class DataSetFamilyMeta(abc.ABCMeta): - def __new__(cls, name, bases, dict_): - columns = {} - for k, v in dict_.items(): - if isinstance(v, Column): - # capture all the columns off the DataSetFamily class - # and replace them with a descriptor that will raise a helpful - # error message. The columns will get added to the BaseSlice - # for this type. - columns[k] = v - dict_[k] = _DataSetFamilyColumn(k) - - is_abstract = dict_.pop("_abstract", False) - - self = super(DataSetFamilyMeta, cls).__new__( - cls, - name, - bases, - dict_, - ) - - if not is_abstract: - self.extra_dims = extra_dims = OrderedDict( - [(k, frozenset(v)) for k, v in OrderedDict(self.extra_dims).items()] - ) - if not extra_dims: - raise ValueError( - "DataSetFamily must be defined with non-empty" - " extra_dims, or with `_abstract = True`", - ) - - class BaseSlice(self._SliceType): - dataset_family = self - - ndim = self.slice_ndim - domain = self.domain - - locals().update(columns) - - BaseSlice.__name__ = "%sBaseSlice" % self.__name__ - self._SliceType = BaseSlice - - # each type gets a unique cache - self._slice_cache = {} - return self - - def __repr__(self): - return "" % ( - self.__name__, - list(self.extra_dims), - ) - - -class DataSetFamilySlice(DataSet): - """Marker type for slices of a - :class:`ziplime.pipeline.data.dataset.DataSetFamily` objects - """ - - -# XXX: This docstring was mostly written when the abstraction here was -# "MultiDimensionalDataSet". It probably needs some rewriting. -class DataSetFamily(metaclass=DataSetFamilyMeta): - """ - Base class for Pipeline dataset families. - - Dataset families are used to represent data where the unique identifier for - a row requires more than just asset and date coordinates. A - :class:`DataSetFamily` can also be thought of as a collection of - :class:`~ziplime.pipeline.data.DataSet` objects, each of which has the same - columns, domain, and ndim. - - :class:`DataSetFamily` objects are defined with one or more - :class:`~ziplime.pipeline.data.Column` objects, plus one additional field: - ``extra_dims``. - - The ``extra_dims`` field defines coordinates other than asset and date that - must be fixed to produce a logical timeseries. The column objects determine - columns that will be shared by slices of the family. - - ``extra_dims`` are represented as an ordered dictionary where the keys are - the dimension name, and the values are a set of unique values along that - dimension. - - To work with a :class:`DataSetFamily` in a pipeline expression, one must - choose a specific value for each of the extra dimensions using the - :meth:`~ziplime.pipeline.data.DataSetFamily.slice` method. - For example, given a :class:`DataSetFamily`: - - .. code-block:: python - - class SomeDataSet(DataSetFamily): - extra_dims = [ - ('dimension_0', {'a', 'b', 'c'}), - ('dimension_1', {'d', 'e', 'f'}), - ] - - column_0 = Column(float) - column_1 = Column(bool) - - This dataset might represent a table with the following columns: - - :: - - sid :: int64 - asof_date :: datetime64[ns] - timestamp :: datetime64[ns] - dimension_0 :: str - dimension_1 :: str - column_0 :: float64 - column_1 :: bool - - Here we see the implicit ``sid``, ``asof_date`` and ``timestamp`` columns - as well as the extra dimensions columns. - - This :class:`DataSetFamily` can be converted to a regular :class:`DataSet` - with: - - .. code-block:: python - - DataSetSlice = SomeDataSet.slice(dimension_0='a', dimension_1='e') - - This sliced dataset represents the rows from the higher dimensional dataset - where ``(dimension_0 == 'a') & (dimension_1 == 'e')``. - """ - - _abstract = True # Removed by metaclass - - domain = GENERIC - slice_ndim = 2 - - _SliceType = DataSetFamilySlice - - @type.__call__ - class extra_dims: - """OrderedDict[str, frozenset] of dimension name -> unique values - - May be defined on subclasses as an iterable of pairs: the - metaclass converts this attribute to an OrderedDict. - """ - - __isabstractmethod__ = True - - def __get__(self, instance, owner): - return [] - - @classmethod - def _canonical_key(cls, args, kwargs): - extra_dims = cls.extra_dims - dimensions_set = set(extra_dims) - if not set(kwargs) <= dimensions_set: - extra = sorted(set(kwargs) - dimensions_set) - raise TypeError( - "%s does not have the following %s: %s\n" - "Valid dimensions are: %s" - % ( - cls.__name__, - s("dimension", extra), - ", ".join(extra), - ", ".join(extra_dims), - ), - ) - - if len(args) > len(extra_dims): - raise TypeError( - "%s has %d extra %s but %d %s given" - % ( - cls.__name__, - len(extra_dims), - s("dimension", extra_dims), - len(args), - plural("was", "were", args), - ), - ) - - missing = object() - coords = OrderedDict(zip(extra_dims, repeat(missing))) - to_add = dict(zip(extra_dims, args)) - coords.update(to_add) - added = set(to_add) - - for key, value in kwargs.items(): - if key in added: - raise TypeError( - "%s got multiple values for dimension %r" - % ( - cls.__name__, - coords, - ), - ) - coords[key] = value - added.add(key) - - missing = {k for k, v in coords.items() if v is missing} - if missing: - missing = sorted(missing) - raise TypeError( - "no coordinate provided to %s for the following %s: %s" - % ( - cls.__name__, - s("dimension", missing), - ", ".join(missing), - ), - ) - - # validate that all of the provided values exist along their given - # dimensions - for key, value in coords.items(): - if value not in cls.extra_dims[key]: - raise ValueError( - "%r is not a value along the %s dimension of %s" - % ( - value, - key, - cls.__name__, - ), - ) - - return coords, tuple(coords.items()) - - @classmethod - def _make_dataset(cls, coords): - """Construct a new dataset given the coordinates.""" - - class Slice(cls._SliceType): - extra_coords = coords - - Slice.__name__ = "%s.slice(%s)" % ( - cls.__name__, - ", ".join("%s=%r" % item for item in coords.items()), - ) - return Slice - - @classmethod - def slice(cls, *args, **kwargs): - """Take a slice of a DataSetFamily to produce a dataset - indexed by asset and date. - - Parameters - ---------- - *args - **kwargs - The coordinates to fix along each extra dimension. - - Returns - ------- - dataset : DataSet - A regular pipeline dataset indexed by asset and date. - - Notes - ----- - The extra dimensions coords used to produce the result are available - under the ``extra_coords`` attribute. - """ - coords, hash_key = cls._canonical_key(args, kwargs) - try: - return cls._slice_cache[hash_key] - except KeyError: - pass - - Slice = cls._make_dataset(coords) - cls._slice_cache[hash_key] = Slice - return Slice - - CurrencyConversion = namedtuple( "CurrencyConversion", ["currency", "field"], diff --git a/ziplime/pipeline/data/dataset_family.py b/ziplime/pipeline/data/dataset_family.py new file mode 100644 index 0000000000..3c4db8eae6 --- /dev/null +++ b/ziplime/pipeline/data/dataset_family.py @@ -0,0 +1,213 @@ +from itertools import repeat + +from collections import OrderedDict + +from ziplime.pipeline.data.dataset_family_meta import DataSetFamilyMeta +from ziplime.pipeline.data.dataset_family_slice import DataSetFamilySlice +from ziplime.pipeline.domain import GENERIC +from ziplime.utils.formatting import plural, s + + +# XXX: This docstring was mostly written when the abstraction here was +# "MultiDimensionalDataSet". It probably needs some rewriting. +class DataSetFamily(metaclass=DataSetFamilyMeta): + """ + Base class for Pipeline dataset families. + + Dataset families are used to represent data where the unique identifier for + a row requires more than just asset and date coordinates. A + :class:`DataSetFamily` can also be thought of as a collection of + :class:`~ziplime.pipeline.data.DataSet` objects, each of which has the same + columns, domain, and ndim. + + :class:`DataSetFamily` objects are defined with one or more + :class:`~ziplime.pipeline.data.Column` objects, plus one additional field: + ``extra_dims``. + + The ``extra_dims`` field defines coordinates other than asset and date that + must be fixed to produce a logical timeseries. The column objects determine + columns that will be shared by slices of the family. + + ``extra_dims`` are represented as an ordered dictionary where the keys are + the dimension name, and the values are a set of unique values along that + dimension. + + To work with a :class:`DataSetFamily` in a pipeline expression, one must + choose a specific value for each of the extra dimensions using the + :meth:`~ziplime.pipeline.data.DataSetFamily.slice` method. + For example, given a :class:`DataSetFamily`: + + .. code-block:: python + + class SomeDataSet(DataSetFamily): + extra_dims = [ + ('dimension_0', {'a', 'b', 'c'}), + ('dimension_1', {'d', 'e', 'f'}), + ] + + column_0 = Column(float) + column_1 = Column(bool) + + This dataset might represent a table with the following columns: + + :: + + sid :: int64 + asof_date :: datetime64[ns] + timestamp :: datetime64[ns] + dimension_0 :: str + dimension_1 :: str + column_0 :: float64 + column_1 :: bool + + Here we see the implicit ``sid``, ``asof_date`` and ``timestamp`` columns + as well as the extra dimensions columns. + + This :class:`DataSetFamily` can be converted to a regular :class:`DataSet` + with: + + .. code-block:: python + + DataSetSlice = SomeDataSet.slice(dimension_0='a', dimension_1='e') + + This sliced dataset represents the rows from the higher dimensional dataset + where ``(dimension_0 == 'a') & (dimension_1 == 'e')``. + """ + + _abstract = True # Removed by metaclass + + domain = GENERIC + slice_ndim = 2 + + _SliceType = DataSetFamilySlice + + @type.__call__ + class extra_dims: + """OrderedDict[str, frozenset] of dimension name -> unique values + + May be defined on subclasses as an iterable of pairs: the + metaclass converts this attribute to an OrderedDict. + """ + + __isabstractmethod__ = True + + def __get__(self, instance, owner): + return [] + + @classmethod + def _canonical_key(cls, args, kwargs): + extra_dims = cls.extra_dims + dimensions_set = set(extra_dims) + if not set(kwargs) <= dimensions_set: + extra = sorted(set(kwargs) - dimensions_set) + raise TypeError( + "%s does not have the following %s: %s\n" + "Valid dimensions are: %s" + % ( + cls.__name__, + s("dimension", extra), + ", ".join(extra), + ", ".join(extra_dims), + ), + ) + + if len(args) > len(extra_dims): + raise TypeError( + "%s has %d extra %s but %d %s given" + % ( + cls.__name__, + len(extra_dims), + s("dimension", extra_dims), + len(args), + plural("was", "were", args), + ), + ) + + missing = object() + coords = OrderedDict(zip(extra_dims, repeat(missing))) + to_add = dict(zip(extra_dims, args)) + coords.update(to_add) + added = set(to_add) + + for key, value in kwargs.items(): + if key in added: + raise TypeError( + "%s got multiple values for dimension %r" + % ( + cls.__name__, + coords, + ), + ) + coords[key] = value + added.add(key) + + missing = {k for k, v in coords.items() if v is missing} + if missing: + missing = sorted(missing) + raise TypeError( + "no coordinate provided to %s for the following %s: %s" + % ( + cls.__name__, + s("dimension", missing), + ", ".join(missing), + ), + ) + + # validate that all of the provided values exist along their given + # dimensions + for key, value in coords.items(): + if value not in cls.extra_dims[key]: + raise ValueError( + "%r is not a value along the %s dimension of %s" + % ( + value, + key, + cls.__name__, + ), + ) + + return coords, tuple(coords.items()) + + @classmethod + def _make_dataset(cls, coords): + """Construct a new dataset given the coordinates.""" + + class Slice(cls._SliceType): + extra_coords = coords + + Slice.__name__ = "%s.slice(%s)" % ( + cls.__name__, + ", ".join("%s=%r" % item for item in coords.items()), + ) + return Slice + + @classmethod + def slice(cls, *args, **kwargs): + """Take a slice of a DataSetFamily to produce a dataset + indexed by asset and date. + + Parameters + ---------- + *args + **kwargs + The coordinates to fix along each extra dimension. + + Returns + ------- + dataset : DataSet + A regular pipeline dataset indexed by asset and date. + + Notes + ----- + The extra dimensions coords used to produce the result are available + under the ``extra_coords`` attribute. + """ + coords, hash_key = cls._canonical_key(args, kwargs) + try: + return cls._slice_cache[hash_key] + except KeyError: + pass + + Slice = cls._make_dataset(coords) + cls._slice_cache[hash_key] = Slice + return Slice diff --git a/ziplime/pipeline/data/dataset_family_column.py b/ziplime/pipeline/data/dataset_family_column.py new file mode 100644 index 0000000000..0d938052e8 --- /dev/null +++ b/ziplime/pipeline/data/dataset_family_column.py @@ -0,0 +1,21 @@ +from ziplime.pipeline.errors.dataset_family_lookup_error import DataSetFamilyLookupError + + +class DataSetFamilyColumn: + """Descriptor used to raise a helpful error when a column is accessed on a + DataSetFamily instead of on the result of a slice. + + Parameters + ---------- + column_names : str + The name of the column. + """ + + def __init__(self, column_name): + self.column_name = column_name + + def __get__(self, instance, owner): + raise DataSetFamilyLookupError( + owner.__name__, + self.column_name, + ) diff --git a/ziplime/pipeline/data/dataset_family_meta.py b/ziplime/pipeline/data/dataset_family_meta.py new file mode 100644 index 0000000000..1e5d781771 --- /dev/null +++ b/ziplime/pipeline/data/dataset_family_meta.py @@ -0,0 +1,59 @@ +import abc +from collections import OrderedDict + +from ziplime.pipeline.data import Column +from ziplime.pipeline.data.dataset_family_column import DataSetFamilyColumn + + +class DataSetFamilyMeta(abc.ABCMeta): + def __new__(cls, name, bases, dict_): + columns = {} + for k, v in dict_.items(): + if isinstance(v, Column): + # capture all the columns off the DataSetFamily class + # and replace them with a descriptor that will raise a helpful + # error message. The columns will get added to the BaseSlice + # for this type. + columns[k] = v + dict_[k] = DataSetFamilyColumn(k) + + is_abstract = dict_.pop("_abstract", False) + + self = super(DataSetFamilyMeta, cls).__new__( + cls, + name, + bases, + dict_, + ) + + if not is_abstract: + self.extra_dims = extra_dims = OrderedDict( + [(k, frozenset(v)) for k, v in OrderedDict(self.extra_dims).items()] + ) + if not extra_dims: + raise ValueError( + "DataSetFamily must be defined with non-empty" + " extra_dims, or with `_abstract = True`", + ) + + class BaseSlice(self._SliceType): + dataset_family = self + + ndim = self.slice_ndim + domain = self.domain + + locals().update(columns) + + BaseSlice.__name__ = "%sBaseSlice" % self.__name__ + self._SliceType = BaseSlice + + # each type gets a unique cache + self._slice_cache = {} + return self + + def __repr__(self): + return "" % ( + self.__name__, + list(self.extra_dims), + ) + diff --git a/ziplime/pipeline/data/dataset_family_slice.py b/ziplime/pipeline/data/dataset_family_slice.py new file mode 100644 index 0000000000..d64ae3add7 --- /dev/null +++ b/ziplime/pipeline/data/dataset_family_slice.py @@ -0,0 +1,8 @@ +from ziplime.pipeline.data import DataSet + + +class DataSetFamilySlice(DataSet): + """Marker type for slices of a + :class:`ziplime.pipeline.data.dataset.DataSetFamily` objects + """ + diff --git a/ziplime/pipeline/data/dataset_meta.py b/ziplime/pipeline/data/dataset_meta.py new file mode 100644 index 0000000000..da1fafa884 --- /dev/null +++ b/ziplime/pipeline/data/dataset_meta.py @@ -0,0 +1,170 @@ +from weakref import WeakKeyDictionary + +from ziplime.pipeline.data.column import Column +from ziplime.pipeline.data.constants import IsSpecialization +from ziplime.pipeline.domain import GENERIC, Domain + + +class DataSetMeta(type): + """ + Metaclass for DataSets + + Supplies name and dataset information to Column attributes, and manages + families of specialized dataset. + """ + + def __new__(metacls, name, bases, dict_): + if len(bases) != 1: + # Disallowing multiple inheritance makes it easier for us to + # determine whether a given dataset is the root for its family of + # specializations. + raise TypeError("Multiple dataset inheritance is not supported.") + + # This marker is set in the class dictionary by `specialize` below. + is_specialization = dict_.pop(IsSpecialization, False) + + newtype = super(DataSetMeta, metacls).__new__(metacls, name, bases, dict_) + + if not isinstance(newtype.domain, Domain): + raise TypeError( + "Expected a Domain for {}.domain, but got {} instead.".format( + newtype.__name__, + type(newtype.domain), + ) + ) + + # Collect all of the column names that we inherit from our parents. + column_names = set().union( + *(getattr(base, "_column_names", ()) for base in bases) + ) + + # Collect any new columns from this dataset. + for maybe_colname, maybe_column in dict_.items(): + if isinstance(maybe_column, Column): + # add column names defined on our class + bound_column_descr = maybe_column.bind(maybe_colname) + setattr(newtype, maybe_colname, bound_column_descr) + column_names.add(maybe_colname) + + newtype._column_names = frozenset(column_names) + + if not is_specialization: + # This is the new root of a family of specializations. Store the + # memoized dictionary for family on this type. + newtype._domain_specializations = WeakKeyDictionary( + { + newtype.domain: newtype, + } + ) + + return newtype + + def specialize(cls, domain: Domain): + """ + Specialize a generic DataSet to a concrete domain. + + Parameters + ---------- + domain : ziplime.pipeline.domain.Domain + Domain to which we should generate a specialization. + + Returns + ------- + specialized : type + A new :class:`~ziplime.pipeline.data.DataSet` subclass with the + same columns as ``self``, but specialized to ``domain``. + """ + # We're already the specialization to this domain, so just return self. + if domain == cls.domain: + return cls + + try: + return cls._domain_specializations[domain] + except KeyError as exc: + if not cls._can_create_new_specialization(domain): + # This either means we're already a specialization and trying + # to create a new specialization, or we're the generic version + # of a root-specialized dataset, which we don't want to create + # new specializations of. + raise ValueError( + "Can't specialize {dataset} from {current} to new domain {new}.".format( + dataset=cls.__name__, + current=cls.domain, + new=domain, + ) + ) from exc + new_type = cls._create_specialization(domain) + cls._domain_specializations[domain] = new_type + return new_type + + def unspecialize(cls): + """ + Unspecialize a dataset to its generic form. + + This is equivalent to ``dataset.specialize(GENERIC)``. + """ + return cls.specialize(GENERIC) + + def _can_create_new_specialization(cls, domain): + # Always allow specializing to a generic domain. + if domain is GENERIC: + return True + elif "_domain_specializations" in vars(cls): + # This branch is True if we're the root of a family. + # Allow specialization if we're generic. + return cls.domain is GENERIC + else: + # If we're not the root of a family, we can't create any new + # specializations. + return False + + def _create_specialization(cls, domain): + # These are all assertions because we should have handled these cases + # already in specialize(). + assert isinstance(domain, Domain) + assert ( + domain not in cls._domain_specializations + ), "Domain specializations should be memoized!" + if domain is not GENERIC: + assert ( + cls.domain is GENERIC + ), "Can't specialize dataset with domain {} to domain {}.".format( + cls.domain, + domain, + ) + + # Create a new subclass of ``self`` with the given domain. + # Mark that it's a specialization so that we know not to create a new + # family for it. + name = cls.__name__ + bases = (cls,) + dict_ = {"domain": domain, IsSpecialization: True} + out = type(name, bases, dict_) + out.__module__ = cls.__module__ + return out + + @property + def columns(cls): + return frozenset(getattr(cls, colname) for colname in cls._column_names) + + @property + def qualname(cls): + if cls.domain is GENERIC: + specialization_key = "" + elif cls.domain.assets: + specialization_key = f"sids<{[asset.sid for asset in cls.domain.assets]}>" + else: + specialization_key = "<" + cls.domain.country_code + ">" + + return cls.__name__ + specialization_key + + # NOTE: We used to use `functools.total_ordering` to account for all of the + # other rich comparison methods, but it has issues in python 3 and + # this method is only used for test purposes, so for now we will just + # keep this in isolation. If we ever need any of the other comparison + # methods we will have to implement them individually. + def __lt__(cls, other): + return id(cls) < id(other) + + def __repr__(cls): + return "" % (cls.__name__, cls.domain) diff --git a/ziplime/pipeline/data/equity_pricing.py b/ziplime/pipeline/data/equity_pricing.py index cd19691d51..e740110c8d 100644 --- a/ziplime/pipeline/data/equity_pricing.py +++ b/ziplime/pipeline/data/equity_pricing.py @@ -1,11 +1,9 @@ """ Dataset representing OHLCV data. """ - +from ziplime.pipeline.data import DataSet, Column from ziplime.utils.numpy_utils import float64_dtype, categorical_dtype -from ..domain import US_EQUITIES -from .dataset import Column, DataSet class EquityPricing(DataSet): @@ -20,7 +18,3 @@ class EquityPricing(DataSet): close = Column(float64_dtype, currency_aware=True) volume = Column(float64_dtype) currency = Column(categorical_dtype) - - -# Backwards compat alias. -USEquityPricing = EquityPricing.specialize(US_EQUITIES) diff --git a/ziplime/pipeline/domain.py b/ziplime/pipeline/domain.py index ca9d279567..004bf12244 100644 --- a/ziplime/pipeline/domain.py +++ b/ziplime/pipeline/domain.py @@ -22,15 +22,31 @@ import pandas as pd import pytz +from ziplime.errors import NoFurtherDataError from ziplime.utils.calendar_utils import get_calendar -from ziplime.country import CountryCode from ziplime.utils.formatting import bulleted_list from exchange_calendars.utils.pandas_utils import days_at_time class Domain: - """Domain interface.""" + """A domain represents a set of labels for the arrays computed by a Pipeline. + + A domain defines two things: + + 1. A calendar defining the dates to which the pipeline's inputs and outputs + should be aligned. The calendar is represented concretely by a pandas + DatetimeIndex. + + 2. The set of assets that the pipeline should compute over. Right now, the only + supported way of representing this set is with a two-character country code + describing the country of assets over which the pipeline should compute. In + the future, we expect to expand this functionality to include more general + concepts. + """ + + def __init__(self): + self.assets = None def sessions(self): """Get all trading sessions for the calendar of this domain. @@ -92,24 +108,6 @@ def roll_forward(self, dt): ) from exc -Domain.__doc__ = """A domain represents a set of labels for the arrays computed by a Pipeline. - -A domain defines two things: - -1. A calendar defining the dates to which the pipeline's inputs and outputs - should be aligned. The calendar is represented concretely by a pandas - DatetimeIndex. - -2. The set of assets that the pipeline should compute over. Right now, the only - supported way of representing this set is with a two-character country code - describing the country of assets over which the pipeline should compute. In - the future, we expect to expand this functionality to include more general - concepts. -""" -Domain.__name__ = "Domain" -Domain.__qualname__ = "ziplime.pipeline.domain.Domain" - - class GenericDomain(Domain): """Special singleton class used to represent generic DataSets and Columns.""" @@ -150,7 +148,7 @@ class EquityCalendarDomain(Domain): """ def __init__( - self, country_code: str, calendar_name:str, data_query_offset=-np.timedelta64(45, "m") + self, country_code: str, calendar_name: str, data_query_offset=-np.timedelta64(45, "m") ): self._country_code = country_code self.calendar_name = calendar_name @@ -169,7 +167,7 @@ def __init__( def country_code(self): return self._country_code - #@lazyval + # @lazyval @property def calendar(self): return get_calendar(self.calendar_name) @@ -196,99 +194,6 @@ def __repr__(self): ) -AR_EQUITIES = EquityCalendarDomain(CountryCode.ARGENTINA, "XBUE") -AT_EQUITIES = EquityCalendarDomain(CountryCode.AUSTRIA, "XWBO") -AU_EQUITIES = EquityCalendarDomain(CountryCode.AUSTRALIA, "XASX") -BE_EQUITIES = EquityCalendarDomain(CountryCode.BELGIUM, "XBRU") -BR_EQUITIES = EquityCalendarDomain(CountryCode.BRAZIL, "BVMF") -CA_EQUITIES = EquityCalendarDomain(CountryCode.CANADA, "XTSE") -CH_EQUITIES = EquityCalendarDomain(CountryCode.SWITZERLAND, "XSWX") -CL_EQUITIES = EquityCalendarDomain(CountryCode.CHILE, "XSGO") -CN_EQUITIES = EquityCalendarDomain(CountryCode.CHINA, "XSHG") -CO_EQUITIES = EquityCalendarDomain(CountryCode.COLOMBIA, "XBOG") -CZ_EQUITIES = EquityCalendarDomain(CountryCode.CZECH_REPUBLIC, "XPRA") -DE_EQUITIES = EquityCalendarDomain(CountryCode.GERMANY, "XFRA") -DK_EQUITIES = EquityCalendarDomain(CountryCode.DENMARK, "XCSE") -ES_EQUITIES = EquityCalendarDomain(CountryCode.SPAIN, "XMAD") -FI_EQUITIES = EquityCalendarDomain(CountryCode.FINLAND, "XHEL") -FR_EQUITIES = EquityCalendarDomain(CountryCode.FRANCE, "XPAR") -GB_EQUITIES = EquityCalendarDomain(CountryCode.UNITED_KINGDOM, "XLON") -GR_EQUITIES = EquityCalendarDomain(CountryCode.GREECE, "ASEX") -HK_EQUITIES = EquityCalendarDomain(CountryCode.HONG_KONG, "XHKG") -HU_EQUITIES = EquityCalendarDomain(CountryCode.HUNGARY, "XBUD") -ID_EQUITIES = EquityCalendarDomain(CountryCode.INDONESIA, "XIDX") -IE_EQUITIES = EquityCalendarDomain(CountryCode.IRELAND, "XDUB") -IN_EQUITIES = EquityCalendarDomain(CountryCode.INDIA, "XBOM") -IT_EQUITIES = EquityCalendarDomain(CountryCode.ITALY, "XMIL") -JP_EQUITIES = EquityCalendarDomain(CountryCode.JAPAN, "XTKS") -KR_EQUITIES = EquityCalendarDomain(CountryCode.SOUTH_KOREA, "XKRX") -MX_EQUITIES = EquityCalendarDomain(CountryCode.MEXICO, "XMEX") -MY_EQUITIES = EquityCalendarDomain(CountryCode.MALAYSIA, "XKLS") -NL_EQUITIES = EquityCalendarDomain(CountryCode.NETHERLANDS, "XAMS") -NO_EQUITIES = EquityCalendarDomain(CountryCode.NORWAY, "XOSL") -NZ_EQUITIES = EquityCalendarDomain(CountryCode.NEW_ZEALAND, "XNZE") -PE_EQUITIES = EquityCalendarDomain(CountryCode.PERU, "XLIM") -PH_EQUITIES = EquityCalendarDomain(CountryCode.PHILIPPINES, "XPHS") -PK_EQUITIES = EquityCalendarDomain(CountryCode.PAKISTAN, "XKAR") -PL_EQUITIES = EquityCalendarDomain(CountryCode.POLAND, "XWAR") -PT_EQUITIES = EquityCalendarDomain(CountryCode.PORTUGAL, "XLIS") -RU_EQUITIES = EquityCalendarDomain(CountryCode.RUSSIA, "XMOS") -SE_EQUITIES = EquityCalendarDomain(CountryCode.SWEDEN, "XSTO") -SG_EQUITIES = EquityCalendarDomain(CountryCode.SINGAPORE, "XSES") -TH_EQUITIES = EquityCalendarDomain(CountryCode.THAILAND, "XBKK") -TR_EQUITIES = EquityCalendarDomain(CountryCode.TURKEY, "XIST") -TW_EQUITIES = EquityCalendarDomain(CountryCode.TAIWAN, "XTAI") -US_EQUITIES = EquityCalendarDomain(CountryCode.UNITED_STATES, "XNYS") -ZA_EQUITIES = EquityCalendarDomain(CountryCode.SOUTH_AFRICA, "XJSE") - -BUILT_IN_DOMAINS = [ - AR_EQUITIES, - AT_EQUITIES, - AU_EQUITIES, - BE_EQUITIES, - BR_EQUITIES, - CA_EQUITIES, - CH_EQUITIES, - CL_EQUITIES, - CN_EQUITIES, - CO_EQUITIES, - CZ_EQUITIES, - DE_EQUITIES, - DK_EQUITIES, - ES_EQUITIES, - FI_EQUITIES, - FR_EQUITIES, - GB_EQUITIES, - GR_EQUITIES, - HK_EQUITIES, - HU_EQUITIES, - ID_EQUITIES, - IE_EQUITIES, - IN_EQUITIES, - IT_EQUITIES, - JP_EQUITIES, - KR_EQUITIES, - MX_EQUITIES, - MY_EQUITIES, - NL_EQUITIES, - NO_EQUITIES, - NZ_EQUITIES, - PE_EQUITIES, - PH_EQUITIES, - PK_EQUITIES, - PL_EQUITIES, - PT_EQUITIES, - RU_EQUITIES, - SE_EQUITIES, - SG_EQUITIES, - TH_EQUITIES, - TR_EQUITIES, - TW_EQUITIES, - US_EQUITIES, - ZA_EQUITIES, -] - - def infer_domain(terms): """Infer the domain from a collection of terms. diff --git a/ziplime/pipeline/engine.py b/ziplime/pipeline/engine.py index bd958de139..7b46ffb00b 100644 --- a/ziplime/pipeline/engine.py +++ b/ziplime/pipeline/engine.py @@ -55,30 +55,38 @@ into "narrow" format, with output labels dictated by the Pipeline's screen. This logic lives in SimplePipelineEngine._to_narrow. """ - +import datetime from abc import ABC, abstractmethod from functools import partial +from typing import Callable +import numpy import pandas as pd from numpy import arange, array +from pandas import DatetimeIndex from toolz import groupby from ziplime.errors import NoFurtherDataError -from ziplime.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray +from ziplime.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray, AdjustedArray from ziplime.utils.date_utils import compute_date_range_chunks from ziplime.utils.numpy_utils import as_column, repeat_first_axis, repeat_last_axis from ziplime.utils.pandas_utils import categorical_df_concat, explode from ziplime.utils.string_formatting import bulleted_list - -from .domain import GENERIC, Domain -from .graph import maybe_specialize -from .hooks import DelegatingHooks -from .term import AssetExists, InputDates, LoadableTerm +from ziplime.pipeline.pipeline import Pipeline +from ziplime.pipeline.terms.term import Term +from ziplime.pipeline.terms.loadable_term import LoadableTerm +from .domain import Domain, GENERIC +from .graph import maybe_specialize, ExecutionPlan +from .hooks import DelegatingHooks, PipelineHooks +from .terms.asset_exists import AssetExists +from .terms.input_dates import InputDates +from ..assets.services.asset_service import AssetService class PipelineEngine(ABC): @abstractmethod - def run_pipeline(self, pipeline, start_date, end_date, hooks=None): + def run_pipeline(self, pipeline: Pipeline, start_date: datetime.date, end_date: datetime.date, + hooks: list[PipelineHooks] = None): """Compute values for ``pipeline`` from ``start_date`` to ``end_date``. Parameters @@ -110,7 +118,8 @@ def run_pipeline(self, pipeline, start_date, end_date, hooks=None): @abstractmethod def run_chunked_pipeline( - self, pipeline, start_date, end_date, chunksize, hooks=None + self, pipeline: Pipeline, start_date: datetime.date, end_date: datetime.date, chunksize: int, + hooks: list[PipelineHooks] = None ): """Compute values for ``pipeline`` from ``start_date`` to ``end_date``, in date chunks of size ``chunksize``. @@ -161,13 +170,15 @@ class NoEngineRegistered(Exception): class ExplodingPipelineEngine(PipelineEngine): """A PipelineEngine that doesn't do anything.""" - def run_pipeline(self, pipeline, start_date, end_date, hooks=None): + def run_pipeline(self, pipeline: Pipeline, start_date: datetime.date, end_date: datetime.date, + hooks: list[PipelineHooks] = None): raise NoEngineRegistered( "Attempted to run a pipeline but no pipeline " "resources were registered." ) def run_chunked_pipeline( - self, pipeline, start_date, end_date, chunksize, hooks=None + self, pipeline: Pipeline, start_date: datetime.date, end_date: datetime.date, chunksize: int, + hooks: list[PipelineHooks] = None ): raise NoEngineRegistered( "Attempted to run a chunked pipeline but no pipeline " @@ -176,8 +187,12 @@ def run_chunked_pipeline( def default_populate_initial_workspace( - initial_workspace, root_mask_term, execution_plan, dates, assets -): + initial_workspace: dict[Term, AdjustedArray], + root_mask_term: Term, + execution_plan: ExecutionPlan, + dates: pd.DatetimeIndex, + assets: pd.Index +) -> dict[Term, AdjustedArray]: """The default implementation for ``populate_initial_workspace``. This function returns the ``initial_workspace`` argument without making any modifications. @@ -214,7 +229,7 @@ class SimplePipelineEngine(PipelineEngine): get_loader : callable A function that is given a loadable term and returns a PipelineLoader to use to retrieve raw data for that term. - asset_finder : ziplime.assets.AssetFinder + asset_service : ziplime.assets.AssetFinder An AssetFinder instance. We depend on the AssetFinder to determine which assets are in the top-level universe at any point in time. populate_initial_workspace : callable, optional @@ -233,28 +248,27 @@ class SimplePipelineEngine(PipelineEngine): __slots__ = ( "_get_loader", - "_finder", "_root_mask_term", "_root_mask_dates_term", "_populate_initial_workspace", ) def __init__( - self, - get_loader, - asset_finder, - default_domain=GENERIC, - populate_initial_workspace=None, - default_hooks=None, + self, + get_loader: Callable, + asset_service: AssetService, + default_domain: Domain=GENERIC, + populate_initial_workspace: Callable=None, + default_hooks=None, ): self._get_loader = get_loader - self._finder = asset_finder + self._asset_service = asset_service self._root_mask_term = AssetExists() self._root_mask_dates_term = InputDates() self._populate_initial_workspace = ( - populate_initial_workspace or default_populate_initial_workspace + populate_initial_workspace or default_populate_initial_workspace ) self._default_domain = default_domain @@ -263,8 +277,9 @@ def __init__( else: self._default_hooks = list(default_hooks) - def run_chunked_pipeline( - self, pipeline, start_date, end_date, chunksize, hooks=None + async def run_chunked_pipeline( + self, pipeline: Pipeline, start_date: datetime.datetime, end_date: datetime.datetime, chunksize: int, + hooks: list[PipelineHooks] = None ): """Compute values for ``pipeline`` from ``start_date`` to ``end_date``, in date chunks of size ``chunksize``. @@ -276,9 +291,9 @@ def run_chunked_pipeline( ---------- pipeline : Pipeline The pipeline to run. - start_date : pd.Timestamp + start_date : datetime.datetime The start date to run the pipeline for. - end_date : pd.Timestamp + end_date : datetime.datetime The end date to run the pipeline for. chunksize : int The number of days to execute at a time. @@ -305,16 +320,16 @@ def run_chunked_pipeline( """ domain = self.resolve_domain(pipeline) ranges = compute_date_range_chunks( - domain.sessions(), - start_date, - end_date, - chunksize, + sessions=domain.sessions(), + start_date=start_date, + end_date=end_date, + chunksize=chunksize, ) - hooks = self._resolve_hooks(hooks) + hooks = self._resolve_hooks(hooks=hooks) run_pipeline = partial(self._run_pipeline_impl, pipeline, hooks=hooks) with hooks.running_pipeline(pipeline, start_date, end_date): - chunks = [run_pipeline(s, e) for s, e in ranges] + chunks = [await run_pipeline(s, e) for s, e in ranges] if len(chunks) == 1: # OPTIMIZATION: Don't make an extra copy in `categorical_df_concat` @@ -324,18 +339,19 @@ def run_chunked_pipeline( # Filter out empty chunks. Empty dataframes lose dtype information, # which makes concatenation fail. nonempty_chunks = [c for c in chunks if len(c)] - return categorical_df_concat(nonempty_chunks, inplace=True) + return categorical_df_concat(df_list=nonempty_chunks, inplace=True) - def run_pipeline(self, pipeline, start_date, end_date, hooks=None): + async def run_pipeline(self, pipeline: Pipeline, start_date: datetime.datetime, end_date: datetime.datetime, + hooks: list[PipelineHooks] | None = None): """Compute values for ``pipeline`` from ``start_date`` to ``end_date``. Parameters ---------- pipeline : ziplime.pipeline.Pipeline The pipeline to run. - start_date : pd.Timestamp + start_date : datetime.datetime Start date of the computed matrix. - end_date : pd.Timestamp + end_date : datetime.datetime End date of the computed matrix. hooks : list[implements(PipelineHooks)], optional Hooks for instrumenting Pipeline execution. @@ -354,16 +370,19 @@ def run_pipeline(self, pipeline, start_date, end_date, hooks=None): A screen of ``None`` indicates that a row should be returned for each asset that existed each day. """ - hooks = self._resolve_hooks(hooks) - with hooks.running_pipeline(pipeline, start_date, end_date): - return self._run_pipeline_impl( - pipeline, - start_date, - end_date, - hooks, + hooks = self._resolve_hooks(hooks=hooks) + with hooks.running_pipeline(pipeline=pipeline, start_date=start_date, end_date=end_date): + return await self._run_pipeline_impl( + pipeline=pipeline, + start_date=start_date, + end_date=end_date, + hooks=hooks, ) - def _run_pipeline_impl(self, pipeline, start_date, end_date, hooks): + async def _run_pipeline_impl( + self, pipeline: Pipeline, start_date: datetime.date, end_date: datetime.date, + hooks: list[PipelineHooks] + ): """Shared core for ``run_pipeline`` and ``run_chunked_pipeline``.""" # See notes at the top of this module for a description of the # algorithm implemented here. @@ -373,39 +392,39 @@ def _run_pipeline_impl(self, pipeline, start_date, end_date, hooks): f"start_date={start_date}, end_date={end_date}" ) - domain = self.resolve_domain(pipeline) + domain = self.resolve_domain(pipeline=pipeline) plan = pipeline.to_execution_plan( - domain, - self._root_mask_term, - start_date, - end_date, + domain=domain, + default_screen=self._root_mask_term, + start_date=start_date, + end_date=end_date, ) extra_rows = plan.extra_rows[self._root_mask_term] - root_mask = self._compute_root_mask( - domain, - start_date, - end_date, - extra_rows, + root_mask = await self._compute_root_mask( + domain=domain, + start_date=start_date, + end_date=end_date, + extra_rows=extra_rows, ) - dates, sids, root_mask_values = explode(root_mask) + dates, sids, root_mask_values = explode(df=root_mask) workspace = self._populate_initial_workspace( - { + initial_workspace={ self._root_mask_term: root_mask_values, self._root_mask_dates_term: as_column(dates.values), }, - self._root_mask_term, - plan, - dates, - sids, + root_mask_term=self._root_mask_term, + execution_plan=plan, + dates=dates, + assets=sids, ) - refcounts = plan.initial_refcounts(workspace) - execution_order = plan.execution_order(workspace, refcounts) + refcounts = plan.initial_refcounts(initial_terms=workspace) + execution_order = plan.execution_order(workspace=workspace, refcounts=refcounts) - with hooks.computing_chunk(execution_order, start_date, end_date): - results = self.compute_chunk( + with hooks.computing_chunk(terms=execution_order, start_date=start_date, end_date=end_date): + results = await self.compute_chunk( graph=plan, dates=dates, sids=sids, @@ -415,15 +434,15 @@ def _run_pipeline_impl(self, pipeline, start_date, end_date, hooks): hooks=hooks, ) - return self._to_narrow( - plan.outputs, - results, - results.pop(plan.screen_name), - dates[extra_rows:], - sids, + return await self._to_narrow( + terms=plan.outputs, + data=results, + mask=results.pop(plan.screen_name), + dates=dates[extra_rows:], + assets=sids, ) - def _compute_root_mask(self, domain, start_date, end_date, extra_rows): + async def _compute_root_mask(self, domain: Domain, start_date: datetime.date, end_date: datetime.date, extra_rows: int): """Compute a lifetimes matrix from our AssetFinder, then drop columns that didn't exist at all during the query dates. @@ -451,19 +470,19 @@ def _compute_root_mask(self, domain, start_date, end_date, extra_rows): """ sessions = domain.sessions() - if start_date not in sessions: + if pd.to_datetime(start_date) not in sessions: raise ValueError( f"Pipeline start date ({start_date}) is not a trading session for " f"domain {domain}." ) - elif end_date not in sessions: + elif pd.to_datetime(end_date) not in sessions: raise ValueError( f"Pipeline end date {end_date} is not a trading session for " f"domain {domain}." ) - start_idx, end_idx = sessions.slice_locs(start_date, end_date) + start_idx, end_idx = sessions.slice_locs(start=start_date, end=end_date) if start_idx < extra_rows: raise NoFurtherDataError.from_lookback_window( initial_message="Insufficient data to compute Pipeline:", @@ -477,12 +496,18 @@ def _compute_root_mask(self, domain, start_date, end_date, extra_rows): # # Build lifetimes matrix reaching back to `extra_rows` days before # `start_date.` - finder = self._finder - lifetimes = finder.lifetimes( - sessions[start_idx - extra_rows : end_idx], - include_start_date=False, - country_codes=(domain.country_code,), - ) + if domain.assets is not None: + lifetimes = await self._asset_service.asset_lifetimes( + dates=sessions[start_idx - extra_rows: end_idx], + include_start_date=False, + assets=domain.assets, + ) + else: + lifetimes = await self._asset_service.lifetimes( + dates=sessions[start_idx - extra_rows: end_idx], + include_start_date=False, + country_codes=[domain.country_code, ], + ) if not lifetimes.columns.unique: columns = lifetimes.columns @@ -510,7 +535,7 @@ def _compute_root_mask(self, domain, start_date, end_date, extra_rows): return ret @staticmethod - def _inputs_for_term(term, workspace, graph, domain, refcounts): + def _inputs_for_term(term: Term, workspace: dict, graph: ExecutionPlan, domain: Domain, refcounts: dict[str, int]): """ Compute inputs for the given term. @@ -524,7 +549,7 @@ def _inputs_for_term(term, workspace, graph, domain, refcounts): # We need to specialize here because we don't change ComputableTerm # after resolving domains, so they can still contain generic terms as # inputs. - specialized = [maybe_specialize(t, domain) for t in term.inputs] + specialized = [maybe_specialize(term=t, domain=domain) for t in term.inputs] if term.windowed: # If term is windowed, then all input data should be instances of @@ -558,8 +583,10 @@ def _inputs_for_term(term, workspace, graph, domain, refcounts): out.append(input_data) return out - def compute_chunk( - self, graph, dates, sids, workspace, refcounts, execution_order, hooks + async def compute_chunk( + self, graph: ExecutionPlan, dates: DatetimeIndex, sids: pd.Index, workspace: dict[Term, AdjustedArray], + refcounts: dict[Term, int], execution_order: list[Term], + hooks: PipelineHooks ): """Compute the Pipeline terms in the graph for the requested start and end dates. @@ -593,7 +620,7 @@ def compute_chunk( results : dict Dictionary mapping requested results to outputs. """ - self._validate_compute_chunk_params(graph, dates, sids, workspace) + self._validate_compute_chunk_params(graph=graph, dates=dates, sids=sids, initial_workspace=workspace) get_loader = self._get_loader @@ -641,10 +668,10 @@ def loader_group_key(term): # Asset labels are always the same, but date labels vary by how # many extra rows are needed. mask, mask_dates = graph.mask_and_dates_for_term( - term, - self._root_mask_term, - workspace, - dates, + term=term, + root_mask_term=self._root_mask_term, + workspace=workspace, + all_dates=dates, ) if isinstance(term, LoadableTerm): @@ -652,38 +679,38 @@ def loader_group_key(term): to_load = sorted( loader_groups[loader_group_key(term)], key=lambda t: t.dataset ) - self._ensure_can_load(loader, to_load) + self._ensure_can_load(loader=loader, terms=to_load) with hooks.loading_terms(to_load): - loaded = loader.load_adjusted_array( - domain, - to_load, - mask_dates, - sids, - mask, + loaded = await loader.load_adjusted_array( + domain=domain, + columns=to_load, + dates=mask_dates, + sids=sids, + mask=mask, ) assert set(loaded) == set(to_load), ( - "loader did not return an AdjustedArray for each column\n" - "expected: %r\n" - "got: %r" - % ( - sorted(to_load, key=repr), - sorted(loaded, key=repr), - ) + "loader did not return an AdjustedArray for each column\n" + "expected: %r\n" + "got: %r" + % ( + sorted(to_load, key=repr), + sorted(loaded, key=repr), + ) ) workspace.update(loaded) else: - with hooks.computing_term(term): + with hooks.computing_term(term=term): workspace[term] = term._compute( self._inputs_for_term( - term, - workspace, - graph, - domain, - refcounts, + term=term, + workspace=workspace, + graph=graph, + domain=domain, + refcounts=refcounts, ), - mask_dates, - sids, - mask, + dates=mask_dates, + assets=sids, + mask=mask, ) if term.ndim == 2: assert workspace[term].shape == mask.shape @@ -692,7 +719,7 @@ def loader_group_key(term): # Decref dependencies of ``term``, and clear any terms # whose refcounts hit 0. - for garbage in graph.decref_dependencies(term, refcounts): + for garbage in graph.decref_dependencies(term=term, refcounts=refcounts): del workspace[garbage] # At this point, all the output terms are in the workspace. @@ -700,10 +727,10 @@ def loader_group_key(term): graph_extra_rows = graph.extra_rows for name, term in graph.outputs.items(): # Truncate off extra rows from outputs. - out[name] = workspace[term][graph_extra_rows[term] :] + out[name] = workspace[term][graph_extra_rows[term]:] return out - def _to_narrow(self, terms, data, mask, dates, assets): + async def _to_narrow(self, terms: dict[str, Term], data: dict[str, AdjustedArray], mask, dates: numpy.ndarray, assets: numpy.ndarray): """ Convert raw computed pipeline results into a DataFrame for public APIs. @@ -757,13 +784,13 @@ def _to_narrow(self, terms, data, mask, dates, assets): # Using this to convert np.records to tuples final_columns[name] = terms[name].postprocess(data[name][mask]) - resolved_assets = array(self._finder.retrieve_all(assets)) + resolved_assets = array(await self._asset_service.get_assets_by_sids(sids=list(assets))) index = _pipeline_output_index(dates, resolved_assets, mask) return pd.DataFrame( data=final_columns, index=index, columns=final_columns.keys() ) - def _validate_compute_chunk_params(self, graph, dates, sids, initial_workspace): + def _validate_compute_chunk_params(self, graph: ExecutionPlan, dates: pd.DatetimeIndex, sids: pd.Index, initial_workspace: dict[Term, AdjustedArray]): """ Verify that the values passed to compute_chunk are well-formed. """ @@ -841,7 +868,7 @@ def _validate_compute_chunk_params(self, graph, dates, sids, initial_workspace): ) ) - def resolve_domain(self, pipeline): + def resolve_domain(self, pipeline: Pipeline): """Resolve a concrete domain for ``pipeline``.""" domain = pipeline.domain(default=self._default_domain) if domain is GENERIC: @@ -852,10 +879,10 @@ def resolve_domain(self, pipeline): ) return domain - def _is_special_root_term(self, term): + def _is_special_root_term(self, term: Term): return term is self._root_mask_term or term is self._root_mask_dates_term - def _resolve_hooks(self, hooks): + def _resolve_hooks(self, hooks: list[PipelineHooks]): if hooks is None: hooks = [] return DelegatingHooks(self._default_hooks + hooks) diff --git a/ziplime/data/data_sources/grpc/__init__.py b/ziplime/pipeline/errors/__init__.py similarity index 100% rename from ziplime/data/data_sources/grpc/__init__.py rename to ziplime/pipeline/errors/__init__.py diff --git a/ziplime/pipeline/errors/bad_binary_operator.py b/ziplime/pipeline/errors/bad_binary_operator.py new file mode 100644 index 0000000000..bc5aa9ae69 --- /dev/null +++ b/ziplime/pipeline/errors/bad_binary_operator.py @@ -0,0 +1,22 @@ +class BadBinaryOperator(TypeError): + """ + Called when a bad binary operation is encountered. + + Parameters + ---------- + op : str + The attempted operation + left : ziplime.computable.Term + The left hand side of the operation. + right : ziplime.computable.Term + The right hand side of the operation. + """ + + def __init__(self, op, left, right): + super(BadBinaryOperator, self).__init__( + "Can't compute {left} {op} {right}".format( + op=op, + left=type(left).__name__, + right=type(right).__name__, + ) + ) diff --git a/ziplime/pipeline/errors/dataset_family_lookup_error.py b/ziplime/pipeline/errors/dataset_family_lookup_error.py new file mode 100644 index 0000000000..d7a6d78320 --- /dev/null +++ b/ziplime/pipeline/errors/dataset_family_lookup_error.py @@ -0,0 +1,33 @@ +from textwrap import dedent + + +class DataSetFamilyLookupError(AttributeError): + """Exception thrown when a column is accessed on a DataSetFamily + instead of on the result of a slice. + + Parameters + ---------- + family_name : str + The name of the DataSetFamily on which the access occurred. + column_name : str + The name of the column accessed. + """ + + def __init__(self, family_name, column_name): + self.family_name = family_name + self.column_name = column_name + + def __str__(self): + # NOTE: when ``aggregate`` is added, remember to update this message + return dedent( + """\ + Attempted to access column {c} from DataSetFamily {d}: + + To work with dataset families, you must first select a + slice using the ``slice`` method: + + {d}.slice(...).{c} + """.format( + c=self.column_name, d=self.family_name + ) + ) diff --git a/ziplime/pipeline/errors/invalid_classifier_comparison.py b/ziplime/pipeline/errors/invalid_classifier_comparison.py new file mode 100644 index 0000000000..5706c23113 --- /dev/null +++ b/ziplime/pipeline/errors/invalid_classifier_comparison.py @@ -0,0 +1,10 @@ +class InvalidClassifierComparison(TypeError): + def __init__(self, classifier, compval): + super(InvalidClassifierComparison, self).__init__( + "Can't compare classifier of dtype" + " {dtype} to value {value} of type {type}.".format( + dtype=classifier.dtype, + value=compval, + type=type(compval).__name__, + ) + ) diff --git a/ziplime/pipeline/expression.py b/ziplime/pipeline/expression.py index 251d58b260..5838bc045f 100644 --- a/ziplime/pipeline/expression.py +++ b/ziplime/pipeline/expression.py @@ -12,7 +12,11 @@ full, inf, ) -from ziplime.pipeline.term import Term, ComputableTerm + +from ziplime.pipeline.errors.bad_binary_operator import BadBinaryOperator +from ziplime.pipeline.terms.term import Term +from ziplime.pipeline.terms.computable_term import ComputableTerm + from ziplime.utils.numpy_utils import bool_dtype _VARIABLE_NAME_RE = re.compile("^(x_)([0-9]+)$") @@ -104,28 +108,7 @@ def _ensure_element(tup, elem): return tuple(chain(tup, (elem,))), len(tup) -class BadBinaryOperator(TypeError): - """ - Called when a bad binary operation is encountered. - Parameters - ---------- - op : str - The attempted operation - left : ziplime.computable.Term - The left hand side of the operation. - right : ziplime.computable.Term - The right hand side of the operation. - """ - - def __init__(self, op, left, right): - super(BadBinaryOperator, self).__init__( - "Can't compute {left} {op} {right}".format( - op=op, - left=type(left).__name__, - right=type(right).__name__, - ) - ) def method_name_for_op(op, commute=False): diff --git a/ziplime/pipeline/factors/__init__.py b/ziplime/pipeline/factors/__init__.py deleted file mode 100644 index 55296d0e38..0000000000 --- a/ziplime/pipeline/factors/__init__.py +++ /dev/null @@ -1,85 +0,0 @@ -from .factor import ( - CustomFactor, - Factor, - Latest, - RecarrayField, -) -from .basic import ( - AnnualizedVolatility, - AverageDollarVolume, - DailyReturns, - EWMA, - ExponentialWeightedMovingAverage, - ExponentialWeightedMovingStdDev, - EWMSTD, - LinearWeightedMovingAverage, - MaxDrawdown, - PeerCount, - PercentChange, - Returns, - SimpleMovingAverage, - VWAP, - WeightedAverageValue, -) -from .events import ( - BusinessDaysSincePreviousEvent, - BusinessDaysUntilNextEvent, -) -from .statistical import ( - RollingPearson, - RollingSpearman, - RollingLinearRegressionOfReturns, - RollingPearsonOfReturns, - RollingSpearmanOfReturns, - SimpleBeta, -) -from .technical import ( - Aroon, - BollingerBands, - FastStochasticOscillator, - IchimokuKinkoHyo, - MACDSignal, - MovingAverageConvergenceDivergenceSignal, - RateOfChangePercentage, - RSI, - TrueRange, -) - -__all__ = [ - "AnnualizedVolatility", - "Aroon", - "AverageDollarVolume", - "BollingerBands", - "BusinessDaysSincePreviousEvent", - "BusinessDaysUntilNextEvent", - "CustomFactor", - "DailyReturns", - "EWMA", - "EWMSTD", - "ExponentialWeightedMovingAverage", - "ExponentialWeightedMovingStdDev", - "Factor", - "FastStochasticOscillator", - "IchimokuKinkoHyo", - "Latest", - "LinearWeightedMovingAverage", - "MACDSignal", - "MaxDrawdown", - "MovingAverageConvergenceDivergenceSignal", - "PeerCount", - "PercentChange", - "RSI", - "RateOfChangePercentage", - "RecarrayField", - "Returns", - "RollingLinearRegressionOfReturns", - "RollingPearson", - "RollingPearsonOfReturns", - "RollingSpearman", - "RollingSpearmanOfReturns", - "SimpleBeta", - "SimpleMovingAverage", - "TrueRange", - "VWAP", - "WeightedAverageValue", -] diff --git a/ziplime/pipeline/factors/basic.py b/ziplime/pipeline/factors/basic.py deleted file mode 100644 index ac4d9186fd..0000000000 --- a/ziplime/pipeline/factors/basic.py +++ /dev/null @@ -1,565 +0,0 @@ -"""Simple common factors. -""" - -from numbers import Number -from numpy import ( - arange, - average, - clip, - copyto, - exp, - fmax, - full, - isnan, - log, - # NINF, - sqrt, - sum as np_sum, - unique, - errstate as np_errstate, -) -import numpy as np - -from ziplime.pipeline.data import EquityPricing -from ziplime.utils.math_utils import ( - nanargmax, - nanmax, - nanmean, - nanstd, - nansum, -) -from ziplime.utils.numpy_utils import ( - float64_dtype, - # ignore_nanwarnings, -) - -from .factor import CustomFactor -from ..mixins import SingleInputMixin - - -class Returns(CustomFactor): - """ - Calculates the percent change in close price over the given window_length. - - **Default Inputs**: [EquityPricing.close] - """ - - inputs = [EquityPricing.close] - window_safe = True - - def _validate(self): - super(Returns, self)._validate() - if self.window_length < 2: - raise ValueError( - "'Returns' expected a window length of at least 2, but was " - "given {window_length}. For daily returns, use a window " - "length of 2.".format(window_length=self.window_length) - ) - - def compute(self, today, assets, out, close): - out[:] = (close[-1] - close[0]) / close[0] - - -class PercentChange(SingleInputMixin, CustomFactor): - """ - Calculates the percent change over the given window_length. - - **Default Inputs:** None - - **Default Window Length:** None - - Notes - ----- - Percent change is calculated as ``(new - old) / abs(old)``. - """ - - window_safe = True - - def _validate(self): - super(PercentChange, self)._validate() - if self.window_length < 2: - raise ValueError( - "'PercentChange' expected a window length" - "of at least 2, but was given {window_length}. " - "For daily percent change, use a window " - "length of 2.".format(window_length=self.window_length) - ) - - def compute(self, today, assets, out, values): - with np_errstate(divide="ignore", invalid="ignore"): - out[:] = (values[-1] - values[0]) / abs(values[0]) - - -class DailyReturns(Returns): - """ - Calculates daily percent change in close price. - - **Default Inputs**: [EquityPricing.close] - """ - - inputs = [EquityPricing.close] - window_safe = True - window_length = 2 - - -class SimpleMovingAverage(SingleInputMixin, CustomFactor): - """ - Average Value of an arbitrary column - - **Default Inputs**: None - - **Default Window Length**: None - """ - - # numpy's nan functions throw warnings when passed an array containing only - # nans, but they still returns the desired value (nan), so we ignore the - # warning. - # ctx = ignore_nanwarnings() - - def compute(self, today, assets, out, data): - out[:] = nanmean(data, axis=0) - - -class WeightedAverageValue(CustomFactor): - """ - Helper for VWAP-like computations. - - **Default Inputs:** None - - **Default Window Length:** None - """ - - def compute(self, today, assets, out, base, weight): - out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0) - - -class VWAP(WeightedAverageValue): - """ - Volume Weighted Average Price - - **Default Inputs:** [EquityPricing.close, EquityPricing.volume] - - **Default Window Length:** None - """ - - inputs = (EquityPricing.close, EquityPricing.volume) - - -class MaxDrawdown(SingleInputMixin, CustomFactor): - """ - Max Drawdown - - **Default Inputs:** None - - **Default Window Length:** None - """ - - # ctx = ignore_nanwarnings() - - def compute(self, today, assets, out, data): - drawdowns = fmax.accumulate(data, axis=0) - data - drawdowns[isnan(drawdowns)] = -np.inf - drawdown_ends = nanargmax(drawdowns, axis=0) - - # TODO: Accelerate this loop in Cython or Numba. - for i, end in enumerate(drawdown_ends): - peak = nanmax(data[: end + 1, i]) - out[i] = (peak - data[end, i]) / data[end, i] - - -class AverageDollarVolume(CustomFactor): - """ - Average Daily Dollar Volume - - **Default Inputs:** [EquityPricing.close, EquityPricing.volume] - - **Default Window Length:** None - """ - - inputs = [EquityPricing.close, EquityPricing.volume] - - def compute(self, today, assets, out, close, volume): - out[:] = nansum(close * volume, axis=0) / len(close) - - -def exponential_weights(length, decay_rate): - """ - Build a weight vector for an exponentially-weighted statistic. - - The resulting ndarray is of the form:: - - [decay_rate ** length, ..., decay_rate ** 2, decay_rate] - - Parameters - ---------- - length : int - The length of the desired weight vector. - decay_rate : float - The rate at which entries in the weight vector increase or decrease. - - Returns - ------- - weights : ndarray[float64] - """ - return full(length, decay_rate, float64_dtype) ** arange(length + 1, 1, -1) - - -class _ExponentialWeightedFactor(SingleInputMixin, CustomFactor): - """ - Base class for factors implementing exponential-weighted operations. - - **Default Inputs:** None - - **Default Window Length:** None - - Parameters - ---------- - inputs : length-1 list or tuple of BoundColumn - The expression over which to compute the average. - window_length : int > 0 - Length of the lookback window over which to compute the average. - decay_rate : float, 0 < decay_rate <= 1 - Weighting factor by which to discount past observations. - - When calculating historical averages, rows are multiplied by the - sequence:: - - decay_rate, decay_rate ** 2, decay_rate ** 3, ... - - Methods - ------- - weights - from_span - from_halflife - from_center_of_mass - """ - - params = ("decay_rate",) - - @classmethod - def from_span(cls, inputs, window_length, span: Number, **kwargs): - """ - Convenience constructor for passing `decay_rate` in terms of `span`. - - Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the - behavior equivalent to passing `span` to pandas.ewma. - - Examples - -------- - .. code-block:: python - - # Equivalent to: - # my_ewma = EWMA( - # inputs=[EquityPricing.close], - # window_length=30, - # decay_rate=(1 - (2.0 / (1 + 15.0))), - # ) - my_ewma = EWMA.from_span( - inputs=[EquityPricing.close], - window_length=30, - span=15, - ) - - Notes - ----- - This classmethod is provided by both - :class:`ExponentialWeightedMovingAverage` and - :class:`ExponentialWeightedMovingStdDev`. - """ - if span <= 1: - raise ValueError("`span` must be a positive number. %s was passed." % span) - - decay_rate = 1.0 - (2.0 / (1.0 + span)) - assert 0.0 < decay_rate <= 1.0 - - return cls( - inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs - ) - - @classmethod - def from_halflife(cls, inputs, window_length, halflife: Number, **kwargs): - """ - Convenience constructor for passing ``decay_rate`` in terms of half - life. - - Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides - the behavior equivalent to passing `halflife` to pandas.ewma. - - Examples - -------- - .. code-block:: python - - # Equivalent to: - # my_ewma = EWMA( - # inputs=[EquityPricing.close], - # window_length=30, - # decay_rate=np.exp(np.log(0.5) / 15), - # ) - my_ewma = EWMA.from_halflife( - inputs=[EquityPricing.close], - window_length=30, - halflife=15, - ) - - Notes - ----- - This classmethod is provided by both - :class:`ExponentialWeightedMovingAverage` and - :class:`ExponentialWeightedMovingStdDev`. - """ - if halflife <= 0: - raise ValueError( - "`span` must be a positive number. %s was passed." % halflife - ) - decay_rate = exp(log(0.5) / halflife) - assert 0.0 < decay_rate <= 1.0 - - return cls( - inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs - ) - - @classmethod - def from_center_of_mass(cls, inputs, window_length, center_of_mass, **kwargs): - """ - Convenience constructor for passing `decay_rate` in terms of center of - mass. - - Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides - behavior equivalent to passing `center_of_mass` to pandas.ewma. - - Examples - -------- - .. code-block:: python - - # Equivalent to: - # my_ewma = EWMA( - # inputs=[EquityPricing.close], - # window_length=30, - # decay_rate=(1 - (1 / 15.0)), - # ) - my_ewma = EWMA.from_center_of_mass( - inputs=[EquityPricing.close], - window_length=30, - center_of_mass=15, - ) - - Notes - ----- - This classmethod is provided by both - :class:`ExponentialWeightedMovingAverage` and - :class:`ExponentialWeightedMovingStdDev`. - """ - return cls( - inputs=inputs, - window_length=window_length, - decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))), - **kwargs, - ) - - -class ExponentialWeightedMovingAverage(_ExponentialWeightedFactor): - """ - Exponentially Weighted Moving Average - - **Default Inputs:** None - - **Default Window Length:** None - - Parameters - ---------- - inputs : length-1 list/tuple of BoundColumn - The expression over which to compute the average. - window_length : int > 0 - Length of the lookback window over which to compute the average. - decay_rate : float, 0 < decay_rate <= 1 - Weighting factor by which to discount past observations. - - When calculating historical averages, rows are multiplied by the - sequence:: - - decay_rate, decay_rate ** 2, decay_rate ** 3, ... - - Notes - ----- - - This class can also be imported under the name ``EWMA``. - - See Also - -------- - :meth:`pandas.DataFrame.ewm` - """ - - def compute(self, today, assets, out, data, decay_rate): - out[:] = average( - data, - axis=0, - weights=exponential_weights(len(data), decay_rate), - ) - - -class ExponentialWeightedMovingStdDev(_ExponentialWeightedFactor): - """ - Exponentially Weighted Moving Standard Deviation - - **Default Inputs:** None - - **Default Window Length:** None - - Parameters - ---------- - inputs : length-1 list/tuple of BoundColumn - The expression over which to compute the average. - window_length : int > 0 - Length of the lookback window over which to compute the average. - decay_rate : float, 0 < decay_rate <= 1 - Weighting factor by which to discount past observations. - - When calculating historical averages, rows are multiplied by the - sequence:: - - decay_rate, decay_rate ** 2, decay_rate ** 3, ... - - Notes - ----- - - This class can also be imported under the name ``EWMSTD``. - - See Also - -------- - :func:`pandas.DataFrame.ewm` - """ - - def compute(self, today, assets, out, data, decay_rate): - weights = exponential_weights(len(data), decay_rate) - - mean = average(data, axis=0, weights=weights) - variance = average((data - mean) ** 2, axis=0, weights=weights) - - squared_weight_sum = np_sum(weights) ** 2 - bias_correction = squared_weight_sum / (squared_weight_sum - np_sum(weights**2)) - out[:] = sqrt(variance * bias_correction) - - -class LinearWeightedMovingAverage(SingleInputMixin, CustomFactor): - """ - Weighted Average Value of an arbitrary column - - **Default Inputs**: None - - **Default Window Length**: None - """ - - # numpy's nan functions throw warnings when passed an array containing only - # nans, but they still returns the desired value (nan), so we ignore the - # warning. - # ctx = ignore_nanwarnings() - - def compute(self, today, assets, out, data): - ndays = data.shape[0] - - # Initialize weights array - weights = arange(1, ndays + 1, dtype=float64_dtype).reshape(ndays, 1) - - # Compute normalizer - normalizer = (ndays * (ndays + 1)) / 2 - - # Weight the data - weighted_data = data * weights - - # Compute weighted averages - out[:] = nansum(weighted_data, axis=0) / normalizer - - -class AnnualizedVolatility(CustomFactor): - """ - Volatility. The degree of variation of a series over time as measured by - the standard deviation of daily returns. - https://en.wikipedia.org/wiki/Volatility_(finance) - - **Default Inputs:** [Returns(window_length=2)] - - Parameters - ---------- - annualization_factor : float, optional - The number of time units per year. Defaults is 252, the number of NYSE - trading days in a normal year. - """ - - inputs = [Returns(window_length=2)] - params = {"annualization_factor": 252.0} - window_length = 252 - - def compute(self, today, assets, out, returns, annualization_factor): - out[:] = nanstd(returns, axis=0) * (annualization_factor**0.5) - - -class PeerCount(SingleInputMixin, CustomFactor): - """ - Peer Count of distinct categories in a given classifier. This factor - is returned by the classifier instance method peer_count() - - **Default Inputs:** None - - **Default Window Length:** 1 - """ - - window_length = 1 - - def _validate(self): - super(PeerCount, self)._validate() - if self.window_length != 1: - raise ValueError( - "'PeerCount' expected a window length of 1, but was given" - "{window_length}.".format(window_length=self.window_length) - ) - - def compute(self, today, assets, out, classifier_values): - # Convert classifier array to group label int array - group_labels, null_label = self.inputs[0]._to_integral(classifier_values[0]) - _, inverse, counts = unique( # Get counts, idx of unique groups - group_labels, - return_counts=True, - return_inverse=True, - ) - copyto(out, counts[inverse], where=(group_labels != null_label)) - - -# Convenience aliases -EWMA = ExponentialWeightedMovingAverage -EWMSTD = ExponentialWeightedMovingStdDev - - -class Clip(CustomFactor): - """ - Clip (limit) the values in a factor. - - Given an interval, values outside the interval are clipped to the interval - edges. For example, if an interval of ``[0, 1]`` is specified, values - smaller than 0 become 0, and values larger than 1 become 1. - - **Default Window Length:** 1 - - Parameters - ---------- - min_bound : float - The minimum value to use. - max_bound : float - The maximum value to use. - - Notes - ----- - To only clip values on one side, ``-np.inf` and ``np.inf`` may be passed. - For example, to only clip the maximum value but not clip a minimum value: - - .. code-block:: python - - Clip(inputs=[factor], min_bound=-np.inf, max_bound=user_provided_max) - - See Also - -------- - numpy.clip - """ - - window_length = 1 - params = ("min_bound", "max_bound") - - def compute(self, today, assets, out, values, min_bound, max_bound): - clip(values[-1], min_bound, max_bound, out=out) diff --git a/ziplime/pipeline/factors/statistical.py b/ziplime/pipeline/factors/statistical.py deleted file mode 100644 index e7fa090f94..0000000000 --- a/ziplime/pipeline/factors/statistical.py +++ /dev/null @@ -1,695 +0,0 @@ -from numexpr import evaluate -import numpy as np -from numpy import broadcast_arrays -from scipy.stats import ( - linregress, - spearmanr, -) - -from ziplime.errors import IncompatibleTerms -from ziplime.pipeline.factors import CustomFactor -from ziplime.pipeline.filters import SingleAsset -from ziplime.pipeline.mixins import StandardOutputs -from ziplime.pipeline.term import AssetExists -from ziplime.utils.math_utils import nanmean -from ziplime.utils.numpy_utils import ( - float64_dtype, -) - -from .basic import Returns -from ziplime.assets.entities.asset import Asset - - -class _RollingCorrelation(CustomFactor): - - def __new__(cls, base_factor: np.float64 | np.float32, target: np.float64 | np.float32, correlation_length, - mask=None): - if target.ndim == 2 and base_factor.mask is not target.mask: - raise IncompatibleTerms(term_1=base_factor, term_2=target) - if correlation_length < 2: - raise ValueError("correlation_length must be greater than or equal to 2") - return super(_RollingCorrelation, cls).__new__( - cls, - inputs=[base_factor, target], - window_length=correlation_length, - mask=mask, - ) - - -class RollingPearson(_RollingCorrelation): - """ - A Factor that computes pearson correlation coefficients between the columns - of a given Factor and either the columns of another Factor/BoundColumn or a - slice/single column of data. - - Parameters - ---------- - base_factor : ziplime.pipeline.Factor - The factor for which to compute correlations of each of its columns - with `target`. - target : ziplime.pipeline.Term with a numeric dtype - The term with which to compute correlations against each column of data - produced by `base_factor`. This term may be a Factor, a BoundColumn or - a Slice. If `target` is two-dimensional, correlations are computed - asset-wise. - correlation_length : int - Length of the lookback window over which to compute each correlation - coefficient. - mask : ziplime.pipeline.Filter, optional - A Filter describing which assets (columns) of `base_factor` should have - their correlation with `target` computed each day. - - See Also - -------- - :func:`scipy.stats.pearsonr` - :meth:`Factor.pearsonr` - :class:`ziplime.pipeline.factors.RollingPearsonOfReturns` - - Notes - ----- - Most users should call Factor.pearsonr rather than directly construct an - instance of this class. - """ - - window_safe = True - - def compute(self, today, assets, out, base_data, target_data): - vectorized_pearson_r( - base_data, - target_data, - allowed_missing=0, - out=out, - ) - - -class RollingSpearman(_RollingCorrelation): - """ - A Factor that computes spearman rank correlation coefficients between the - columns of a given Factor and either the columns of another - Factor/BoundColumn or a slice/single column of data. - - Parameters - ---------- - base_factor : ziplime.pipeline.Factor - The factor for which to compute correlations of each of its columns - with `target`. - target : ziplime.pipeline.Term with a numeric dtype - The term with which to compute correlations against each column of data - produced by `base_factor`. This term may be a Factor, a BoundColumn or - a Slice. If `target` is two-dimensional, correlations are computed - asset-wise. - correlation_length : int - Length of the lookback window over which to compute each correlation - coefficient. - mask : ziplime.pipeline.Filter, optional - A Filter describing which assets (columns) of `base_factor` should have - their correlation with `target` computed each day. - - See Also - -------- - :func:`scipy.stats.spearmanr` - :meth:`Factor.spearmanr` - :class:`ziplime.pipeline.factors.RollingSpearmanOfReturns` - - Notes - ----- - Most users should call Factor.spearmanr rather than directly construct an - instance of this class. - """ - - window_safe = True - - def compute(self, today, assets, out, base_data, target_data): - # If `target_data` is a Slice or single column of data, broadcast it - # out to the same shape as `base_data`, then compute column-wise. This - # is efficient because each column of the broadcasted array only refers - # to a single memory location. - target_data = broadcast_arrays(target_data, base_data)[0] - for i in range(len(out)): - out[i] = spearmanr(base_data[:, i], target_data[:, i])[0] - - -class RollingLinearRegression(CustomFactor): - """ - A Factor that performs an ordinary least-squares regression predicting the - columns of a given Factor from either the columns of another - Factor/BoundColumn or a slice/single column of data. - - Parameters - ---------- - dependent : ziplime.pipeline.Factor - The factor whose columns are the predicted/dependent variable of each - regression with `independent`. - independent : ziplime.pipeline.slice.Slice or ziplime.pipeline.Factor - The factor/slice whose columns are the predictor/independent variable - of each regression with `dependent`. If `independent` is a Factor, - regressions are computed asset-wise. - regression_length : int - Length of the lookback window over which to compute each regression. - mask : ziplime.pipeline.Filter, optional - A Filter describing which assets (columns) of `dependent` should be - regressed against `independent` each day. - - See Also - -------- - :func:`scipy.stats.linregress` - :meth:`Factor.linear_regression` - :class:`ziplime.pipeline.factors.RollingLinearRegressionOfReturns` - - Notes - ----- - Most users should call Factor.linear_regression rather than directly - construct an instance of this class. - """ - - outputs = ["alpha", "beta", "r_value", "p_value", "stderr"] - - def __new__(cls, dependent: np.float64 | np.int64, independent: np.float64 | np.int64, - regression_length, mask=None): - if independent.ndim == 2 and dependent.mask is not independent.mask: - raise IncompatibleTerms(term_1=dependent, term_2=independent) - if regression_length < 2: - raise ValueError("regression_length must be greater than or equal to 2") - return super(RollingLinearRegression, cls).__new__( - cls, - inputs=[dependent, independent], - window_length=regression_length, - mask=mask, - ) - - def compute(self, today, assets, out, dependent, independent): - alpha = out.alpha - beta = out.beta - r_value = out.r_value - p_value = out.p_value - stderr = out.stderr - - def regress(y, x): - regr_results = linregress(y=y, x=x) - # `linregress` returns its results in the following order: - # slope, intercept, r-value, p-value, stderr - alpha[i] = regr_results[1] - beta[i] = regr_results[0] - r_value[i] = regr_results[2] - p_value[i] = regr_results[3] - stderr[i] = regr_results[4] - - # If `independent` is a Slice or single column of data, broadcast it - # out to the same shape as `dependent`, then compute column-wise. This - # is efficient because each column of the broadcasted array only refers - # to a single memory location. - independent = broadcast_arrays(independent, dependent)[0] - for i in range(len(out)): - regress(y=dependent[:, i], x=independent[:, i]) - - -class RollingPearsonOfReturns(RollingPearson): - """ - Calculates the Pearson product-moment correlation coefficient of the - returns of the given asset with the returns of all other assets. - - Pearson correlation is what most people mean when they say "correlation - coefficient" or "R-value". - - Parameters - ---------- - target : ziplime.assets.Asset - The asset to correlate with all other assets. - returns_length : int >= 2 - Length of the lookback window over which to compute returns. Daily - returns require a window length of 2. - correlation_length : int >= 1 - Length of the lookback window over which to compute each correlation - coefficient. - mask : ziplime.pipeline.Filter, optional - A Filter describing which assets should have their correlation with the - target asset computed each day. - - Notes - ----- - Computing this factor over many assets can be time consuming. It is - recommended that a mask be used in order to limit the number of assets over - which correlations are computed. - - Examples - -------- - Let the following be example 10-day returns for three different assets:: - - SPY MSFT FB - 2017-03-13 -.03 .03 .04 - 2017-03-14 -.02 -.03 .02 - 2017-03-15 -.01 .02 .01 - 2017-03-16 0 -.02 .01 - 2017-03-17 .01 .04 -.01 - 2017-03-20 .02 -.03 -.02 - 2017-03-21 .03 .01 -.02 - 2017-03-22 .04 -.02 -.02 - - Suppose we are interested in SPY's rolling returns correlation with each - stock from 2017-03-17 to 2017-03-22, using a 5-day look back window (that - is, we calculate each correlation coefficient over 5 days of data). We can - achieve this by doing:: - - rolling_correlations = RollingPearsonOfReturns( - target=sid(8554), - returns_length=10, - correlation_length=5, - ) - - The result of computing ``rolling_correlations`` from 2017-03-17 to - 2017-03-22 gives:: - - SPY MSFT FB - 2017-03-17 1 .15 -.96 - 2017-03-20 1 .10 -.96 - 2017-03-21 1 -.16 -.94 - 2017-03-22 1 -.16 -.85 - - Note that the column for SPY is all 1's, as the correlation of any data - series with itself is always 1. To understand how each of the other values - were calculated, take for example the .15 in MSFT's column. This is the - correlation coefficient between SPY's returns looking back from 2017-03-17 - (-.03, -.02, -.01, 0, .01) and MSFT's returns (.03, -.03, .02, -.02, .04). - - See Also - -------- - :class:`ziplime.pipeline.factors.RollingSpearmanOfReturns` - :class:`ziplime.pipeline.factors.RollingLinearRegressionOfReturns` - """ - - def __new__(cls, target, returns_length, correlation_length, mask=None): - # Use the `SingleAsset` filter here because it protects against - # inputting a non-existent target asset. - returns = Returns( - window_length=returns_length, - mask=(AssetExists() | SingleAsset(asset=target)), - ) - return super(RollingPearsonOfReturns, cls).__new__( - cls, - base_factor=returns, - target=returns[target], - correlation_length=correlation_length, - mask=mask, - ) - - -class RollingSpearmanOfReturns(RollingSpearman): - """ - Calculates the Spearman rank correlation coefficient of the returns of the - given asset with the returns of all other assets. - - Parameters - ---------- - target : ziplime.assets.Asset - The asset to correlate with all other assets. - returns_length : int >= 2 - Length of the lookback window over which to compute returns. Daily - returns require a window length of 2. - correlation_length : int >= 1 - Length of the lookback window over which to compute each correlation - coefficient. - mask : ziplime.pipeline.Filter, optional - A Filter describing which assets should have their correlation with the - target asset computed each day. - - Notes - ----- - Computing this factor over many assets can be time consuming. It is - recommended that a mask be used in order to limit the number of assets over - which correlations are computed. - - See Also - -------- - :class:`ziplime.pipeline.factors.RollingPearsonOfReturns` - :class:`ziplime.pipeline.factors.RollingLinearRegressionOfReturns` - """ - - def __new__(cls, target, returns_length, correlation_length, mask=None): - # Use the `SingleAsset` filter here because it protects against - # inputting a non-existent target asset. - returns = Returns( - window_length=returns_length, - mask=(AssetExists() | SingleAsset(asset=target)), - ) - return super(RollingSpearmanOfReturns, cls).__new__( - cls, - base_factor=returns, - target=returns[target], - correlation_length=correlation_length, - mask=mask, - ) - - -class RollingLinearRegressionOfReturns(RollingLinearRegression): - """Perform an ordinary least-squares regression predicting the returns of all - other assets on the given asset. - - Parameters - ---------- - target : ziplime.assets.Asset - The asset to regress against all other assets. - returns_length : int >= 2 - Length of the lookback window over which to compute returns. Daily - returns require a window length of 2. - regression_length : int >= 1 - Length of the lookback window over which to compute each regression. - mask : ziplime.pipeline.Filter, optional - A Filter describing which assets should be regressed against the target - asset each day. - - Notes - ----- - Computing this factor over many assets can be time consuming. It is - recommended that a mask be used in order to limit the number of assets over - which regressions are computed. - - This factor is designed to return five outputs: - - - alpha, a factor that computes the intercepts of each regression. - - beta, a factor that computes the slopes of each regression. - - r_value, a factor that computes the correlation coefficient of each - regression. - - p_value, a factor that computes, for each regression, the two-sided - p-value for a hypothesis test whose null hypothesis is that the slope is - zero. - - stderr, a factor that computes the standard error of the estimate of each - regression. - - For more help on factors with multiple outputs, see - :class:`ziplime.pipeline.CustomFactor`. - - Examples - -------- - Let the following be example 10-day returns for three different assets:: - - SPY MSFT FB - 2017-03-13 -.03 .03 .04 - 2017-03-14 -.02 -.03 .02 - 2017-03-15 -.01 .02 .01 - 2017-03-16 0 -.02 .01 - 2017-03-17 .01 .04 -.01 - 2017-03-20 .02 -.03 -.02 - 2017-03-21 .03 .01 -.02 - 2017-03-22 .04 -.02 -.02 - - Suppose we are interested in predicting each stock's returns from SPY's - over rolling 5-day look back windows. We can compute rolling regression - coefficients (alpha and beta) from 2017-03-17 to 2017-03-22 by doing:: - - regression_factor = RollingRegressionOfReturns( - target=sid(8554), - returns_length=10, - regression_length=5, - ) - alpha = regression_factor.alpha - beta = regression_factor.beta - - The result of computing ``alpha`` from 2017-03-17 to 2017-03-22 gives:: - - SPY MSFT FB - 2017-03-17 0 .011 .003 - 2017-03-20 0 -.004 .004 - 2017-03-21 0 .007 .006 - 2017-03-22 0 .002 .008 - - And the result of computing ``beta`` from 2017-03-17 to 2017-03-22 gives:: - - SPY MSFT FB - 2017-03-17 1 .3 -1.1 - 2017-03-20 1 .2 -1 - 2017-03-21 1 -.3 -1 - 2017-03-22 1 -.3 -.9 - - Note that SPY's column for alpha is all 0's and for beta is all 1's, as the - regression line of SPY with itself is simply the function y = x. - - To understand how each of the other values were calculated, take for - example MSFT's ``alpha`` and ``beta`` values on 2017-03-17 (.011 and .3, - respectively). These values are the result of running a linear regression - predicting MSFT's returns from SPY's returns, using values starting at - 2017-03-17 and looking back 5 days. That is, the regression was run with - x = [-.03, -.02, -.01, 0, .01] and y = [.03, -.03, .02, -.02, .04], and it - produced a slope of .3 and an intercept of .011. - - See Also - -------- - :class:`ziplime.pipeline.factors.RollingPearsonOfReturns` - :class:`ziplime.pipeline.factors.RollingSpearmanOfReturns` - """ - - window_safe = True - - def __new__(cls, target, returns_length, regression_length, mask=None): - # Use the `SingleAsset` filter here because it protects against - # inputting a non-existent target asset. - returns = Returns( - window_length=returns_length, - mask=(AssetExists() | SingleAsset(asset=target)), - ) - return super(RollingLinearRegressionOfReturns, cls).__new__( - cls, - dependent=returns, - independent=returns[target], - regression_length=regression_length, - mask=mask, - ) - - -class SimpleBeta(CustomFactor, StandardOutputs): - """Factor producing the slope of a regression line between each asset's daily - returns to the daily returns of a single "target" asset. - - Parameters - ---------- - target : ziplime.Asset - Asset against which other assets should be regressed. - regression_length : int - Number of days of daily returns to use for the regression. - allowed_missing_percentage : float, optional - Percentage of returns observations (between 0 and 1) that are allowed - to be missing when calculating betas. Assets with more than this - percentage of returns observations missing will produce values of - NaN. Default behavior is that 25% of inputs can be missing. - """ - - window_safe = True - dtype = float64_dtype - params = ("allowed_missing_count",) - - def __new__(cls, target: Asset, regression_length: int, allowed_missing_percentage: int | float = 0.25): - if regression_length < 3: - raise ValueError("regression_length must be greater than or equal to 3") - if allowed_missing_percentage <= 0.0 or allowed_missing_percentage > 1.0: - raise ValueError("allowed_missing_percentage must be between 0.0 and 1.0") - daily_returns = Returns( - window_length=2, - mask=(AssetExists() | SingleAsset(asset=target)), - ) - allowed_missing_count = int(allowed_missing_percentage * regression_length) - return super(SimpleBeta, cls).__new__( - cls, - inputs=[daily_returns, daily_returns[target]], - window_length=regression_length, - allowed_missing_count=allowed_missing_count, - ) - - def compute( - self, today, assets, out, all_returns, target_returns, allowed_missing_count - ): - vectorized_beta( - dependents=all_returns, - independent=target_returns, - allowed_missing=allowed_missing_count, - out=out, - ) - - def graph_repr(self): - return "{}({!r}, {}, {})".format( - type(self).__name__, - str(self.target.symbol), # coerce from unicode to str in py2. - self.window_length, - self.params["allowed_missing_count"], - ) - - @property - def target(self): - """Get the target of the beta calculation.""" - return self.inputs[1].asset - - def __repr__(self): - return "{}({}, length={}, allowed_missing={})".format( - type(self).__name__, - self.target, - self.window_length, - self.params["allowed_missing_count"], - ) - - -def vectorized_beta(dependents, independent, allowed_missing, out=None): - """Compute slopes of linear regressions between columns of ``dependents`` and - ``independent``. - - Parameters - ---------- - dependents : np.array[N, M] - Array with columns of data to be regressed against ``independent``. - independent : np.array[N, 1] - Independent variable of the regression - allowed_missing : int - Number of allowed missing (NaN) observations per column. Columns with - more than this many non-nan observations in either ``dependents`` or - ``independents`` will output NaN as the regression coefficient. - out : np.array[M] or None, optional - Output array into which to write results. If None, a new array is - created and returned. - - Returns - ------- - slopes : np.array[M] - Linear regression coefficients for each column of ``dependents``. - """ - # Cache these as locals since we're going to call them multiple times. - nan = np.nan - isnan = np.isnan - N, M = dependents.shape - - if out is None: - out = np.full(M, nan) - - # Copy N times as a column vector and fill with nans to have the same - # missing value pattern as the dependent variable. - # - # PERF_TODO: We could probably avoid the space blowup by doing this in - # Cython. - - # shape: (N, M) - independent = np.where( - isnan(dependents), - nan, - independent, - ) - - # Calculate beta as Cov(X, Y) / Cov(X, X). - # https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa - # - # NOTE: The usual formula for covariance is:: - # - # mean((X - mean(X)) * (Y - mean(Y))) - # - # However, we don't actually need to take the mean of both sides of the - # product, because of the folllowing equivalence:: - # - # Let X_res = (X - mean(X)). - # We have: - # - # mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y))) - # (1) = mean((X_res * Y) - (X_res * mean(Y))) - # (2) = mean(X_res * Y) - mean(X_res * mean(Y)) - # (3) = mean(X_res * Y) - mean(X_res) * mean(Y) - # (4) = mean(X_res * Y) - 0 * mean(Y) - # (5) = mean(X_res * Y) - # - # - # The tricky step in the above derivation is step (4). We know that - # mean(X_res) is zero because, for any X: - # - # mean(X - mean(X)) = mean(X) - mean(X) = 0. - # - # The upshot of this is that we only have to center one of `independent` - # and `dependent` when calculating covariances. Since we need the centered - # `independent` to calculate its variance in the next step, we choose to - # center `independent`. - - # shape: (N, M) - ind_residual = independent - nanmean(independent, axis=0) - - # shape: (M,) - covariances = nanmean(ind_residual * dependents, axis=0) - - # We end up with different variances in each column here because each - # column may have a different subset of the data dropped due to missing - # data in the corresponding dependent column. - # shape: (M,) - independent_variances = nanmean(ind_residual ** 2, axis=0) - - # shape: (M,) - np.divide(covariances, independent_variances, out=out) - - # Write nans back to locations where we have more then allowed number of - # missing entries. - nanlocs = isnan(independent).sum(axis=0) > allowed_missing - out[nanlocs] = nan - - return out - - -def vectorized_pearson_r(dependents, independents, allowed_missing, out=None): - """Compute Pearson's r between columns of ``dependents`` and ``independents``. - - Parameters - ---------- - dependents : np.array[N, M] - Array with columns of data to be regressed against ``independent``. - independents : np.array[N, M] or np.array[N, 1] - Independent variable(s) of the regression. If a single column is - passed, it is broadcast to the shape of ``dependents``. - allowed_missing : int - Number of allowed missing (NaN) observations per column. Columns with - more than this many non-nan observations in either ``dependents`` or - ``independents`` will output NaN as the correlation coefficient. - out : np.array[M] or None, optional - Output array into which to write results. If None, a new array is - created and returned. - - Returns - ------- - correlations : np.array[M] - Pearson correlation coefficients for each column of ``dependents``. - - See Also - -------- - :class:`ziplime.pipeline.factors.RollingPearson` - :class:`ziplime.pipeline.factors.RollingPearsonOfReturns` - """ - nan = np.nan - isnan = np.isnan - N, M = dependents.shape - - if out is None: - out = np.full(M, nan) - - if allowed_missing > 0: - # If we're handling nans robustly, we need to mask both arrays to - # locations where either was nan. - either_nan = isnan(dependents) | isnan(independents) - independents = np.where(either_nan, nan, independents) - dependents = np.where(either_nan, nan, dependents) - mean = nanmean - else: - # Otherwise, we can just use mean, which will give us a nan for any - # column where there's ever a nan. - mean = np.mean - - # Pearson R is Cov(X, Y) / StdDev(X) * StdDev(Y) - # c.f. https://en.wikipedia.org/wiki/Pearson_correlation_coefficient - ind_residual = independents - mean(independents, axis=0) - dep_residual = dependents - mean(dependents, axis=0) - - ind_variance = mean(ind_residual ** 2, axis=0) - dep_variance = mean(dep_residual ** 2, axis=0) - - covariances = mean(ind_residual * dep_residual, axis=0) - - evaluate( - "where(mask, nan, cov / sqrt(ind_variance * dep_variance))", - local_dict={ - "cov": covariances, - "mask": isnan(independents).sum(axis=0) > allowed_missing, - "nan": np.nan, - "ind_variance": ind_variance, - "dep_variance": dep_variance, - }, - global_dict={}, - out=out, - ) - return out diff --git a/ziplime/pipeline/factors/technical.py b/ziplime/pipeline/factors/technical.py deleted file mode 100644 index fbcf4b054e..0000000000 --- a/ziplime/pipeline/factors/technical.py +++ /dev/null @@ -1,388 +0,0 @@ -""" -Technical Analysis Factors --------------------------- -""" - -from numpy import ( - abs, - average, - clip, - diff, - dstack, - inf, -) -from numexpr import evaluate - -from ziplime.pipeline.data import EquityPricing -from ziplime.pipeline.factors import CustomFactor -from ziplime.pipeline.mixins import SingleInputMixin -from ziplime.utils.math_utils import ( - nanargmax, - nanargmin, - nanmax, - nanmean, - nanstd, - nanmin, -) -from ziplime.utils.numpy_utils import rolling_window - -from .basic import exponential_weights -from .basic import ( # noqa reexport - # These are re-exported here for backwards compatibility with the old - # definition site. - LinearWeightedMovingAverage, - MaxDrawdown, - SimpleMovingAverage, - VWAP, - WeightedAverageValue, -) - - -class RSI(SingleInputMixin, CustomFactor): - """ - Relative Strength Index - - **Default Inputs**: :data:`ziplime.pipeline.data.EquityPricing.close` - - **Default Window Length**: 15 - """ - - window_length = 15 - inputs = (EquityPricing.close,) - window_safe = True - - def compute(self, today, assets, out, closes): - diffs = diff(closes, axis=0) - ups = nanmean(clip(diffs, 0, inf), axis=0) - downs = abs(nanmean(clip(diffs, -inf, 0), axis=0)) - return evaluate( - "100 - (100 / (1 + (ups / downs)))", - local_dict={"ups": ups, "downs": downs}, - global_dict={}, - out=out, - ) - - -class BollingerBands(CustomFactor): - """ - Bollinger Bands technical indicator. - https://en.wikipedia.org/wiki/Bollinger_Bands - - **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.close` - - Parameters - ---------- - inputs : length-1 iterable[BoundColumn] - The expression over which to compute bollinger bands. - window_length : int > 0 - Length of the lookback window over which to compute the bollinger - bands. - k : float - The number of standard deviations to add or subtract to create the - upper and lower bands. - """ - - params = ("k",) - inputs = (EquityPricing.close,) - outputs = "lower", "middle", "upper" - - def compute(self, today, assets, out, close, k): - difference = k * nanstd(close, axis=0) - out.middle = middle = nanmean(close, axis=0) - out.upper = middle + difference - out.lower = middle - difference - - -class Aroon(CustomFactor): - """ - Aroon technical indicator. - https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator - - **Defaults Inputs:** :data:`ziplime.pipeline.data.EquityPricing.low`, \ - :data:`ziplime.pipeline.data.EquityPricing.high` - - Parameters - ---------- - window_length : int > 0 - Length of the lookback window over which to compute the Aroon - indicator. - """ # noqa - - inputs = (EquityPricing.low, EquityPricing.high) - outputs = ("down", "up") - - def compute(self, today, assets, out, lows, highs): - wl = self.window_length - high_date_index = nanargmax(highs, axis=0) - low_date_index = nanargmin(lows, axis=0) - evaluate( - "(100 * high_date_index) / (wl - 1)", - local_dict={ - "high_date_index": high_date_index, - "wl": wl, - }, - out=out.up, - ) - evaluate( - "(100 * low_date_index) / (wl - 1)", - local_dict={ - "low_date_index": low_date_index, - "wl": wl, - }, - out=out.down, - ) - - -class FastStochasticOscillator(CustomFactor): - """ - Fast Stochastic Oscillator Indicator [%K, Momentum Indicator] - https://wiki.timetotrade.eu/Stochastic - - This stochastic is considered volatile, and varies a lot when used in - market analysis. It is recommended to use the slow stochastic oscillator - or a moving average of the %K [%D]. - - **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.close`, \ - :data:`ziplime.pipeline.data.EquityPricing.low`, \ - :data:`ziplime.pipeline.data.EquityPricing.high` - - **Default Window Length:** 14 - - Returns - ------- - out: %K oscillator - """ - - inputs = (EquityPricing.close, EquityPricing.low, EquityPricing.high) - window_safe = True - window_length = 14 - - def compute(self, today, assets, out, closes, lows, highs): - - highest_highs = nanmax(highs, axis=0) - lowest_lows = nanmin(lows, axis=0) - today_closes = closes[-1] - - evaluate( - "((tc - ll) / (hh - ll)) * 100", - local_dict={ - "tc": today_closes, - "ll": lowest_lows, - "hh": highest_highs, - }, - global_dict={}, - out=out, - ) - - -class IchimokuKinkoHyo(CustomFactor): - """Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud). - http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud - - **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.high`, \ - :data:`ziplime.pipeline.data.EquityPricing.low`, \ - :data:`ziplime.pipeline.data.EquityPricing.close` - - **Default Window Length:** 52 - - Parameters - ---------- - window_length : int > 0 - The length the the window for the senkou span b. - tenkan_sen_length : int >= 0, <= window_length - The length of the window for the tenkan-sen. - kijun_sen_length : int >= 0, <= window_length - The length of the window for the kijou-sen. - chikou_span_length : int >= 0, <= window_length - The lag for the chikou span. - """ # noqa - - params = { - "tenkan_sen_length": 9, - "kijun_sen_length": 26, - "chikou_span_length": 26, - } - inputs = (EquityPricing.high, EquityPricing.low, EquityPricing.close) - outputs = ( - "tenkan_sen", - "kijun_sen", - "senkou_span_a", - "senkou_span_b", - "chikou_span", - ) - window_length = 52 - - def _validate(self): - super(IchimokuKinkoHyo, self)._validate() - for k, v in self.params.items(): - if v > self.window_length: - raise ValueError( - "%s must be <= the window_length: %s > %s" - % ( - k, - v, - self.window_length, - ), - ) - - def compute( - self, - today, - assets, - out, - high, - low, - close, - tenkan_sen_length, - kijun_sen_length, - chikou_span_length, - ): - - out.tenkan_sen = tenkan_sen = ( - high[-tenkan_sen_length:].max(axis=0) + low[-tenkan_sen_length:].min(axis=0) - ) / 2 - out.kijun_sen = kijun_sen = ( - high[-kijun_sen_length:].max(axis=0) + low[-kijun_sen_length:].min(axis=0) - ) / 2 - out.senkou_span_a = (tenkan_sen + kijun_sen) / 2 - out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2 - out.chikou_span = close[chikou_span_length] - - -class RateOfChangePercentage(CustomFactor): - """ - Rate of change Percentage - ROC measures the percentage change in price from one period to the next. - The ROC calculation compares the current price with the price `n` - periods ago. - Formula for calculation: ((price - prevPrice) / prevPrice) * 100 - price - the current price - prevPrice - the price n days ago, equals window length - """ - - def compute(self, today, assets, out, close): - today_close = close[-1] - prev_close = close[0] - evaluate( - "((tc - pc) / pc) * 100", - local_dict={"tc": today_close, "pc": prev_close}, - global_dict={}, - out=out, - ) - - -class TrueRange(CustomFactor): - """ - True Range - - A technical indicator originally developed by J. Welles Wilder, Jr. - Indicates the true degree of daily price change in an underlying. - - **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.high`, \ - :data:`ziplime.pipeline.data.EquityPricing.low`, \ - :data:`ziplime.pipeline.data.EquityPricing.close` - - **Default Window Length:** 2 - """ - - inputs = ( - EquityPricing.high, - EquityPricing.low, - EquityPricing.close, - ) - window_length = 2 - - def compute(self, today, assets, out, highs, lows, closes): - high_to_low = highs[1:] - lows[1:] - high_to_prev_close = abs(highs[1:] - closes[:-1]) - low_to_prev_close = abs(lows[1:] - closes[:-1]) - out[:] = nanmax( - dstack( - ( - high_to_low, - high_to_prev_close, - low_to_prev_close, - ) - ), - 2, - ) - - -class MovingAverageConvergenceDivergenceSignal(CustomFactor): - """ - Moving Average Convergence/Divergence (MACD) Signal line - https://en.wikipedia.org/wiki/MACD - - A technical indicator originally developed by Gerald Appel in the late - 1970's. MACD shows the relationship between two moving averages and - reveals changes in the strength, direction, momentum, and duration of a - trend in a stock's price. - - **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.close` - - Parameters - ---------- - fast_period : int > 0, optional - The window length for the "fast" EWMA. Default is 12. - slow_period : int > 0, > fast_period, optional - The window length for the "slow" EWMA. Default is 26. - signal_period : int > 0, < fast_period, optional - The window length for the signal line. Default is 9. - - Notes - ----- - Unlike most pipeline expressions, this factor does not accept a - ``window_length`` parameter. ``window_length`` is inferred from - ``slow_period`` and ``signal_period``. - """ - - inputs = (EquityPricing.close,) - # We don't use the default form of `params` here because we want to - # dynamically calculate `window_length` from the period lengths in our - # __new__. - params = ("fast_period", "slow_period", "signal_period") - - def __new__(cls, fast_period=12, slow_period=26, signal_period=9, *args, **kwargs): - if fast_period < 1: - raise ValueError("`fast_period` must be >= 1") - if slow_period < 1: - raise ValueError("`slow_period` must be >= 1") - if signal_period < 1: - raise ValueError("`signal_period` must be >= 1") - - if slow_period <= fast_period: - raise ValueError( - "'slow_period' must be greater than 'fast_period', but got\n" - "slow_period={slow}, fast_period={fast}".format( - slow=slow_period, - fast=fast_period, - ) - ) - - return super(MovingAverageConvergenceDivergenceSignal, cls).__new__( - cls, - fast_period=fast_period, - slow_period=slow_period, - signal_period=signal_period, - window_length=slow_period + signal_period - 1, - *args, - **kwargs, - ) - - def _ewma(self, data, length): - decay_rate = 1.0 - (2.0 / (1.0 + length)) - return average(data, axis=1, weights=exponential_weights(length, decay_rate)) - - def compute( - self, today, assets, out, close, fast_period, slow_period, signal_period - ): - slow_EWMA = self._ewma(rolling_window(close, slow_period), slow_period) - fast_EWMA = self._ewma( - rolling_window(close, fast_period)[-signal_period:], fast_period - ) - macd = fast_EWMA - slow_EWMA - out[:] = self._ewma(macd.T, signal_period) - - -# Convenience aliases. -MACDSignal = MovingAverageConvergenceDivergenceSignal diff --git a/ziplime/pipeline/filters/__init__.py b/ziplime/pipeline/filters/__init__.py deleted file mode 100644 index 192c494e41..0000000000 --- a/ziplime/pipeline/filters/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -from .filter import ( - AllPresent, - ArrayPredicate, - CustomFilter, - Filter, - Latest, - MaximumFilter, - NotNullFilter, - NullFilter, - NumExprFilter, - PercentileFilter, - SingleAsset, - StaticAssets, - StaticSids, -) -from .smoothing import All, Any, AtLeastN - -__all__ = [ - "All", - "AllPresent", - "Any", - "ArrayPredicate", - "AtLeastN", - "CustomFilter", - "Filter", - "Latest", - "MaximumFilter", - "NotNullFilter", - "NullFilter", - "NumExprFilter", - "PercentileFilter", - "SingleAsset", - "StaticAssets", - "StaticSids", -] diff --git a/ziplime/pipeline/filters/filter.py b/ziplime/pipeline/filters/filter.py deleted file mode 100644 index 3c2fadce34..0000000000 --- a/ziplime/pipeline/filters/filter.py +++ /dev/null @@ -1,762 +0,0 @@ -""" -filter.py -""" - -from itertools import chain -from operator import attrgetter - -from numpy import ( - any as np_any, - float64, - nan, - nanpercentile, - uint8, -) - -from ziplime.errors import ( - BadPercentileBounds, - NonExistentAssetInTimeFrame, - UnsupportedDataType, -) -from ziplime.lib.labelarray import LabelArray -from ziplime.lib.rank import is_missing, grouped_masked_is_maximal -from ziplime.pipeline.dtypes import ( - CLASSIFIER_DTYPES, - FACTOR_DTYPES, - FILTER_DTYPES, -) -from ziplime.pipeline.expression import ( - BadBinaryOperator, - FILTER_BINOPS, - method_name_for_op, - NumericalExpression, -) -from ziplime.pipeline.mixins import ( - CustomTermMixin, - IfElseMixin, - LatestMixin, - PositiveWindowLengthMixin, - RestrictedDTypeMixin, - SingleInputMixin, - StandardOutputs, -) -from ziplime.pipeline.term import ComputableTerm, Term -from ziplime.utils.numpy_utils import ( - same, - bool_dtype, - int64_dtype, - repeat_first_axis, -) - - -def concat_tuples(*tuples): - """ - Concatenate a sequence of tuples into one tuple. - """ - return tuple(chain(*tuples)) - - -def binary_operator(op): - """ - Factory function for making binary operator methods on a Filter subclass. - - Returns a function "binary_operator" suitable for implementing functions - like __and__ or __or__. - """ - # When combining a Filter with a NumericalExpression, we use this - # attrgetter instance to defer to the commuted interpretation of the - # NumericalExpression operator. - commuted_method_getter = attrgetter(method_name_for_op(op, commute=True)) - - def binary_operator(self, other): - if isinstance(self, NumericalExpression): - self_expr, other_expr, new_inputs = self.build_binary_op( - op, - other, - ) - return NumExprFilter.create( - "({left}) {op} ({right})".format( - left=self_expr, - op=op, - right=other_expr, - ), - new_inputs, - ) - elif isinstance(other, NumericalExpression): - # NumericalExpression overrides numerical ops to correctly handle - # merging of inputs. Look up and call the appropriate - # right-binding operator with ourself as the input. - return commuted_method_getter(other)(self) - elif isinstance(other, Term): - if other.dtype != bool_dtype: - raise BadBinaryOperator(op, self, other) - if self is other: - return NumExprFilter.create( - "x_0 {op} x_0".format(op=op), - (self,), - ) - return NumExprFilter.create( - "x_0 {op} x_1".format(op=op), - (self, other), - ) - elif isinstance(other, int): # Note that this is true for bool as well - return NumExprFilter.create( - "x_0 {op} {constant}".format(op=op, constant=int(other)), - binds=(self,), - ) - raise BadBinaryOperator(op, self, other) - - binary_operator.__doc__ = "Binary Operator: '%s'" % op - return binary_operator - - -def unary_operator(op): - """ - Factory function for making unary operator methods for Filters. - """ - valid_ops = {"~"} - if op not in valid_ops: - raise ValueError("Invalid unary operator %s." % op) - - def unary_operator(self): - # This can't be hoisted up a scope because the types returned by - # unary_op_return_type aren't defined when the top-level function is - # invoked. - if isinstance(self, NumericalExpression): - return NumExprFilter.create( - "{op}({expr})".format(op=op, expr=self._expr), - self.inputs, - ) - else: - return NumExprFilter.create("{op}x_0".format(op=op), (self,)) - - unary_operator.__doc__ = "Unary Operator: '%s'" % op - return unary_operator - - -class Filter(RestrictedDTypeMixin, ComputableTerm): - """ - Pipeline expression computing a boolean output. - - Filters are most commonly useful for describing sets of assets to include - or exclude for some particular purpose. Many Pipeline API functions accept - a ``mask`` argument, which can be supplied a Filter indicating that only - values passing the Filter should be considered when performing the - requested computation. For example, :meth:`ziplime.pipeline.Factor.top` - accepts a mask indicating that ranks should be computed only on assets that - passed the specified Filter. - - The most common way to construct a Filter is via one of the comparison - operators (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``) of - :class:`~ziplime.pipeline.Factor`. For example, a natural way to construct - a Filter for stocks with a 10-day VWAP less than $20.0 is to first - construct a Factor computing 10-day VWAP and compare it to the scalar value - 20.0:: - - >>> from ziplime.pipeline.factors import VWAP - >>> vwap_10 = VWAP(window_length=10) - >>> vwaps_under_20 = (vwap_10 <= 20) - - Filters can also be constructed via comparisons between two Factors. For - example, to construct a Filter producing True for asset/date pairs where - the asset's 10-day VWAP was greater than it's 30-day VWAP:: - - >>> short_vwap = VWAP(window_length=10) - >>> long_vwap = VWAP(window_length=30) - >>> higher_short_vwap = (short_vwap > long_vwap) - - Filters can be combined via the ``&`` (and) and ``|`` (or) operators. - - ``&``-ing together two filters produces a new Filter that produces True if - **both** of the inputs produced True. - - ``|``-ing together two filters produces a new Filter that produces True if - **either** of its inputs produced True. - - The ``~`` operator can be used to invert a Filter, swapping all True values - with Falses and vice-versa. - - Filters may be set as the ``screen`` attribute of a Pipeline, indicating - asset/date pairs for which the filter produces False should be excluded - from the Pipeline's output. This is useful both for reducing noise in the - output of a Pipeline and for reducing memory consumption of Pipeline - results. - """ - - # Filters are window-safe by default, since a yes/no decision means the - # same thing from all temporal perspectives. - window_safe = True - - # Used by RestrictedDTypeMixin - ALLOWED_DTYPES = FILTER_DTYPES - dtype = bool_dtype - - clsdict = locals() - clsdict.update( - {method_name_for_op(op): binary_operator(op) for op in FILTER_BINOPS} - ) - clsdict.update( - { - method_name_for_op(op, commute=True): binary_operator(op) - for op in FILTER_BINOPS - } - ) - - __invert__ = unary_operator("~") - - def _validate(self): - # Run superclass validation first so that we handle `dtype not passed` - # before this. - retval = super(Filter, self)._validate() - if self.dtype != bool_dtype: - raise UnsupportedDataType(typename=type(self).__name__, dtype=self.dtype) - return retval - - @classmethod - def _principal_computable_term_type(cls): - return Filter - - def if_else(self, if_true: ComputableTerm, if_false: ComputableTerm): - """ - Create a term that selects values from one of two choices. - - Parameters - ---------- - if_true : ziplime.pipeline.term.ComputableTerm - Expression whose values should be used at locations where this - filter outputs True. - if_false : ziplime.pipeline.term.ComputableTerm - Expression whose values should be used at locations where this - filter outputs False. - - Returns - ------- - merged : ziplime.pipeline.term.ComputableTerm - A term that computes by taking values from either ``if_true`` or - ``if_false``, depending on the values produced by ``self``. - - The returned term draws from``if_true`` at locations where ``self`` - produces True, and it draws from ``if_false`` at locations where - ``self`` produces False. - - Example - ------- - - Let ``f`` be a Factor that produces the following output:: - - AAPL MSFT MCD BK - 2017-03-13 1.0 2.0 3.0 4.0 - 2017-03-14 5.0 6.0 7.0 8.0 - - Let ``g`` be another Factor that produces the following output:: - - AAPL MSFT MCD BK - 2017-03-13 10.0 20.0 30.0 40.0 - 2017-03-14 50.0 60.0 70.0 80.0 - - Finally, let ``condition`` be a Filter that produces the following - output:: - - AAPL MSFT MCD BK - 2017-03-13 True False True False - 2017-03-14 True True False False - - Then, the expression ``condition.if_else(f, g)`` produces the following - output:: - - AAPL MSFT MCD BK - 2017-03-13 1.0 20.0 3.0 40.0 - 2017-03-14 5.0 6.0 70.0 80.0 - - See Also - -------- - numpy.where - Factor.fillna - """ - true_type = if_true._principal_computable_term_type() - false_type = if_false._principal_computable_term_type() - - if true_type is not false_type: - raise TypeError( - "Mismatched types in if_else(): if_true={}, but if_false={}".format( - true_type.__name__, false_type.__name__ - ) - ) - - if if_true.dtype != if_false.dtype: - raise TypeError( - "Mismatched dtypes in if_else(): " - "if_true.dtype = {}, if_false.dtype = {}".format( - if_true.dtype, if_false.dtype - ) - ) - - if if_true.outputs != if_false.outputs: - raise ValueError( - "Mismatched outputs in if_else(): " - "if_true.outputs = {}, if_false.outputs = {}".format( - if_true.outputs, if_false.outputs - ), - ) - - if not same(if_true.missing_value, if_false.missing_value): - raise ValueError( - "Mismatched missing values in if_else(): " - "if_true.missing_value = {!r}, if_false.missing_value = {!r}".format( - if_true.missing_value, if_false.missing_value - ) - ) - - return_type = type(if_true)._with_mixin(IfElseMixin) - - return return_type( - condition=self, - if_true=if_true, - if_false=if_false, - ) - - -class NumExprFilter(NumericalExpression, Filter): - """ - A Filter computed from a numexpr expression. - """ - - @classmethod - def create(cls, expr, binds): - """ - Helper for creating new NumExprFactors. - - This is just a wrapper around NumericalExpression.__new__ that always - forwards `bool` as the dtype, since Filters can only be of boolean - dtype. - """ - return cls(expr=expr, binds=binds, dtype=bool_dtype) - - def _compute(self, arrays, dates, assets, mask): - """ - Compute our result with numexpr, then re-apply `mask`. - """ - return ( - super(NumExprFilter, self)._compute( - arrays, - dates, - assets, - mask, - ) - & mask - ) - - -class NullFilter(SingleInputMixin, Filter): - """ - A Filter indicating whether input values are missing from an input. - - Parameters - ---------- - factor : ziplime.pipeline.Term - The factor to compare against its missing_value. - """ - - window_length = 0 - - def __new__(cls, term): - return super(NullFilter, cls).__new__( - cls, - inputs=(term,), - ) - - def _compute(self, arrays, dates, assets, mask): - data = arrays[0] - if isinstance(data, LabelArray): - return data.is_missing() - return is_missing(arrays[0], self.inputs[0].missing_value) - - -class NotNullFilter(SingleInputMixin, Filter): - """ - A Filter indicating whether input values are **not** missing from an input. - - Parameters - ---------- - factor : ziplime.pipeline.Term - The factor to compare against its missing_value. - """ - - window_length = 0 - - def __new__(cls, term): - return super(NotNullFilter, cls).__new__( - cls, - inputs=(term,), - ) - - def _compute(self, arrays, dates, assets, mask): - data = arrays[0] - if isinstance(data, LabelArray): - return ~data.is_missing() - return ~is_missing(arrays[0], self.inputs[0].missing_value) - - -class PercentileFilter(SingleInputMixin, Filter): - """ - A Filter representing assets falling between percentile bounds of a Factor. - - Parameters - ---------- - factor : ziplime.pipeline.factor.Factor - The factor over which to compute percentile bounds. - min_percentile : float [0.0, 1.0] - The minimum percentile rank of an asset that will pass the filter. - max_percentile : float [0.0, 1.0] - The maxiumum percentile rank of an asset that will pass the filter. - """ - - window_length = 0 - - def __new__(cls, factor, min_percentile, max_percentile, mask): - return super(PercentileFilter, cls).__new__( - cls, - inputs=(factor,), - mask=mask, - min_percentile=min_percentile, - max_percentile=max_percentile, - ) - - def _init(self, min_percentile, max_percentile, *args, **kwargs): - self._min_percentile = min_percentile - self._max_percentile = max_percentile - return super(PercentileFilter, self)._init(*args, **kwargs) - - @classmethod - def _static_identity(cls, min_percentile, max_percentile, *args, **kwargs): - return ( - super(PercentileFilter, cls)._static_identity(*args, **kwargs), - min_percentile, - max_percentile, - ) - - def _validate(self): - """ - Ensure that our percentile bounds are well-formed. - """ - if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0: - raise BadPercentileBounds( - min_percentile=self._min_percentile, - max_percentile=self._max_percentile, - upper_bound=100.0, - ) - return super(PercentileFilter, self)._validate() - - def _compute(self, arrays, dates, assets, mask): - """ - For each row in the input, compute a mask of all values falling between - the given percentiles. - """ - # TODO: Review whether there's a better way of handling small numbers - # of columns. - data = arrays[0].copy().astype(float64) - data[~mask] = nan - - # FIXME: np.nanpercentile **should** support computing multiple bounds - # at once, but there's a bug in the logic for multiple bounds in numpy - # 1.9.2. It will be fixed in 1.10. - # c.f. https://github.com/numpy/numpy/pull/5981 - lower_bounds = nanpercentile( - data, - self._min_percentile, - axis=1, - keepdims=True, - ) - upper_bounds = nanpercentile( - data, - self._max_percentile, - axis=1, - keepdims=True, - ) - return (lower_bounds <= data) & (data <= upper_bounds) - - def graph_repr(self): - # Graphviz interprets `\l` as "divide label into lines, left-justified" - return "{}:\\l min: {}, max: {}\\l".format( - type(self).__name__, - self._min_percentile, - self._max_percentile, - ) - - -class CustomFilter(PositiveWindowLengthMixin, CustomTermMixin, Filter): - """ - Base class for user-defined Filters. - - Parameters - ---------- - inputs : iterable, optional - An iterable of `BoundColumn` instances (e.g. USEquityPricing.close), - describing the data to load and pass to ``self.compute``. If this - argument is passed to the CustomFilter constructor, we look for a - class-level attribute named ``inputs``. - window_length : int, optional - Number of rows to pass for each input. If this argument is not passed - to the CustomFilter constructor, we look for a class-level attribute - named `window_length`. - - Notes - ----- - Users implementing their own Filters should subclass CustomFilter and - implement a method named ``compute`` with the following signature: - - .. code-block:: python - - def compute(self, today, assets, out, *inputs): - ... - - On each simulation date, ``compute`` will be called with the current date, - an array of sids, an output array, and an input array for each expression - passed as inputs to the CustomFilter constructor. - - The specific types of the values passed to ``compute`` are as follows:: - - today : np.datetime64[ns] - Row label for the last row of all arrays passed as `inputs`. - assets : np.array[int64, ndim=1] - Column labels for `out` and`inputs`. - out : np.array[bool, ndim=1] - Output array of the same shape as `assets`. `compute` should write - its desired return values into `out`. - *inputs : tuple of np.array - Raw data arrays corresponding to the values of `self.inputs`. - - See the documentation for - :class:`~ziplime.pipeline.CustomFactor` for more details on - implementing a custom ``compute`` method. - - See Also - -------- - ziplime.pipeline.CustomFactor - """ - - def _validate(self): - try: - super(CustomFilter, self)._validate() - except UnsupportedDataType as exc: - if self.dtype in CLASSIFIER_DTYPES: - raise UnsupportedDataType( - typename=type(self).__name__, - dtype=self.dtype, - hint="Did you mean to create a CustomClassifier?", - ) from exc - elif self.dtype in FACTOR_DTYPES: - raise UnsupportedDataType( - typename=type(self).__name__, - dtype=self.dtype, - hint="Did you mean to create a CustomFactor?", - ) from exc - raise - - -class ArrayPredicate(SingleInputMixin, Filter): - """ - A filter applying a function from (ndarray, *args) -> ndarray[bool]. - - Parameters - ---------- - term : ziplime.pipeline.Term - Term producing the array over which the predicate will be computed. - op : function(ndarray, *args) -> ndarray[bool] - Function to apply to the result of `term`. - opargs : tuple[hashable] - Additional argument to apply to ``op``. - """ - - params = ("op", "opargs") - window_length = 0 - - def __new__(cls, term: Term, op, opargs: tuple): - hash(opargs) # fail fast if opargs isn't hashable. - return super(ArrayPredicate, cls).__new__( - ArrayPredicate, - op=op, - opargs=opargs, - inputs=(term,), - mask=term.mask, - ) - - def _compute(self, arrays, dates, assets, mask): - params = self.params - data = arrays[0] - return params["op"](data, *params["opargs"]) & mask - - def graph_repr(self): - # Graphviz interprets `\l` as "divide label into lines, left-justified" - return "{}:\\l op: {}.{}()".format( - type(self).__name__, - self.params["op"].__module__, - self.params["op"].__name__, - ) - - -class Latest(LatestMixin, CustomFilter): - """ - Filter producing the most recently-known value of `inputs[0]` on each day. - """ - - pass - - -class SingleAsset(Filter): - """ - A Filter that computes to True only for the given asset. - """ - - inputs = [] - window_length = 1 - - def __new__(cls, asset): - return super(SingleAsset, cls).__new__(cls, asset=asset) - - def _init(self, asset, *args, **kwargs): - self._asset = asset - return super(SingleAsset, self)._init(*args, **kwargs) - - @classmethod - def _static_identity(cls, asset, *args, **kwargs): - return ( - super(SingleAsset, cls)._static_identity(*args, **kwargs), - asset, - ) - - def _compute(self, arrays, dates, assets, mask): - is_my_asset = assets == self._asset.sid - out = repeat_first_axis(is_my_asset, len(mask)) - # Raise an exception if `self._asset` does not exist for the entirety - # of the timeframe over which we are computing. - if (is_my_asset.sum() != 1) or ((out & mask).sum() != len(mask)): - raise NonExistentAssetInTimeFrame( - asset=self._asset, - start_date=dates[0], - end_date=dates[-1], - ) - return out - - def graph_repr(self): - # Graphviz interprets `\l` as "divide label into lines, left-justified" - return "SingleAsset:\\l asset: {!r}\\l".format(self._asset) - - -class StaticSids(Filter): - """ - A Filter that computes True for a specific set of predetermined sids. - - ``StaticSids`` is mostly useful for debugging or for interactively - computing pipeline terms for a fixed set of sids that are known ahead of - time. - - Parameters - ---------- - sids : iterable[int] - An iterable of sids for which to filter. - """ - - inputs = () - window_length = 0 - params = ("sids",) - - def __new__(cls, sids): - sids = frozenset(sids) - return super(StaticSids, cls).__new__(cls, sids=sids) - - def _compute(self, arrays, dates, sids, mask): - my_columns = sids.isin(self.params["sids"]) - return repeat_first_axis(my_columns, len(mask)) & mask - - -class StaticAssets(StaticSids): - """ - A Filter that computes True for a specific set of predetermined assets. - - ``StaticAssets`` is mostly useful for debugging or for interactively - computing pipeline terms for a fixed set of assets that are known ahead of - time. - - Parameters - ---------- - assets : iterable[Asset] - An iterable of assets for which to filter. - """ - - def __new__(cls, assets): - sids = frozenset(asset.sid for asset in assets) - return super(StaticAssets, cls).__new__(cls, sids) - - -class AllPresent(CustomFilter, SingleInputMixin, StandardOutputs): - """Pipeline filter indicating input term has data for a given window.""" - - def _validate(self): - - if isinstance(self.inputs[0], Filter): - raise TypeError("Input to filter `AllPresent` cannot be a Filter.") - - return super(AllPresent, self)._validate() - - def compute(self, today, assets, out, value): - if isinstance(value, LabelArray): - out[:] = ~np_any(value.is_missing(), axis=0) - else: - out[:] = ~np_any( - is_missing(value, self.inputs[0].missing_value), - axis=0, - ) - - -class MaximumFilter(Filter, StandardOutputs): - """Pipeline filter that selects the top asset, possibly grouped and masked.""" - - window_length = 0 - - def __new__(cls, factor, groupby, mask): - if groupby is None: - from ziplime.pipeline.classifiers import Everything - - groupby = Everything() - - return super(MaximumFilter, cls).__new__( - cls, - inputs=(factor, groupby), - mask=mask, - ) - - def _compute(self, arrays, dates, assets, mask): - # XXX: We're doing a lot of unncessary work here if `groupby` isn't - # specified. - data = arrays[0] - group_labels, null_label = self.inputs[1]._to_integral(arrays[1]) - effective_mask = ( - mask - & (group_labels != null_label) - & ~is_missing(data, self.inputs[0].missing_value) - ).view(uint8) - - return grouped_masked_is_maximal( - # Unconditionally view the data as int64. - # This is safe because casting from float64 to int64 is an - # order-preserving operation. - data.view(int64_dtype), - # PERF: Consider supporting different sizes of group labels. - group_labels.astype(int64_dtype), - effective_mask, - ) - - def __repr__(self): - return "Maximum({}, groupby={}, mask={})".format( - self.inputs[0].recursive_repr(), - self.inputs[1].recursive_repr(), - self.mask.recursive_repr(), - ) - - def graph_repr(self): - # Graphviz interprets `\l` as "divide label into lines, left-justified" - return "Maximum:\\l groupby: {}\\l mask: {}\\l".format( - self.inputs[1].recursive_repr(), - self.mask.recursive_repr(), - ) diff --git a/ziplime/pipeline/filters/smoothing.py b/ziplime/pipeline/filters/smoothing.py deleted file mode 100644 index 086ff2281c..0000000000 --- a/ziplime/pipeline/filters/smoothing.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Filters that apply smoothing operations on other filters. - -These are generally useful for controlling/minimizing turnover on existing -Filters. -""" - -from .filter import CustomFilter - - -class All(CustomFilter): - """ - A Filter requiring that assets produce True for ``window_length`` - consecutive days. - - **Default Inputs:** None - - **Default Window Length:** None - """ - - def compute(self, today, assets, out, arg): - out[:] = arg.sum(axis=0) == self.window_length - - -class Any(CustomFilter): - """ - A Filter requiring that assets produce True for at least one day in the - last ``window_length`` days. - - **Default Inputs:** None - - **Default Window Length:** None - """ - - def compute(self, today, assets, out, arg): - out[:] = arg.sum(axis=0) > 0 - - -class AtLeastN(CustomFilter): - """ - A Filter requiring that assets produce True for at least N days in the - last ``window_length`` days. - - **Default Inputs:** None - - **Default Window Length:** None - """ - - params = ("N",) - - def compute(self, today, assets, out, arg, N): - out[:] = arg.sum(axis=0) >= N diff --git a/ziplime/pipeline/graph.py b/ziplime/pipeline/graph.py index 0310c828e6..0dc2f11c6c 100644 --- a/ziplime/pipeline/graph.py +++ b/ziplime/pipeline/graph.py @@ -1,13 +1,17 @@ """ Dependency-Graph representation of Pipeline API terms. """ - +import datetime import uuid import networkx as nx -from ziplime.pipeline.visualize import display_graph +import pandas as pd +from ziplime.lib.adjusted_array import AdjustedArray -from .term import LoadableTerm +from ziplime.pipeline import LoadableTerm +from ziplime.pipeline.visualize import display_graph +from ziplime.pipeline.terms.term import Term +from ziplime.pipeline.domain import Domain class CyclicDependency(Exception): @@ -267,8 +271,8 @@ class ExecutionPlan(TermGraph): offset """ - def __init__(self, domain, terms, start_date, end_date, min_extra_rows=0): - super(ExecutionPlan, self).__init__(terms) + def __init__(self, domain: Domain, terms: list[Term], start_date: datetime.date, end_date: datetime.date, min_extra_rows: int=0): + super(ExecutionPlan, self).__init__(terms=terms) # Specialize all the LoadableTerms in the graph to our domain, so that # when the engine requests an execution order, we emit the specialized @@ -282,7 +286,7 @@ def __init__(self, domain, terms, start_date, end_date, min_extra_rows=0): # lazyval, and we don't want its result to be cached until after we've # specialized. specializations = { - t: t.specialize(domain) for t in self.graph if isinstance(t, LoadableTerm) + t: t.specialize(domain=domain) for t in self.graph if isinstance(t, LoadableTerm) } self.graph = nx.relabel.relabel_nodes(self.graph, specializations) @@ -291,27 +295,27 @@ def __init__(self, domain, terms, start_date, end_date, min_extra_rows=0): sessions = domain.sessions() for term in terms.values(): self.set_extra_rows( - term, - sessions, - start_date, - end_date, + term=term, + all_dates=sessions, + start_date=start_date, + end_date=end_date, min_extra_rows=min_extra_rows, ) - self._assert_all_loadable_terms_specialized_to(domain) + self._assert_all_loadable_terms_specialized_to(domain=domain) - def set_extra_rows(self, term, all_dates, start_date, end_date, min_extra_rows): + def set_extra_rows(self, term: Term, all_dates: pd.DatetimeIndex, start_date: datetime.date, end_date: datetime.date, min_extra_rows: int) -> None: # Specialize any loadable terms before adding extra rows. - term = maybe_specialize(term, self.domain) + term = maybe_specialize(term=term, domain=self.domain) # A term can require that additional extra rows beyond the minimum be # computed. This is most often used with downsampled terms, which need # to ensure that the first date is a computation date. extra_rows_for_term = term.compute_extra_rows( - all_dates, - start_date, - end_date, - min_extra_rows, + all_dates=all_dates, + start_date=start_date, + end_date=end_date, + min_extra_rows=min_extra_rows, ) if extra_rows_for_term < min_extra_rows: raise ValueError( @@ -322,20 +326,20 @@ def set_extra_rows(self, term, all_dates, start_date, end_date, min_extra_rows): ) ) - self._ensure_extra_rows(term, extra_rows_for_term) + self._ensure_extra_rows(term=term, N=extra_rows_for_term) for dependency, additional_extra_rows in term.dependencies.items(): self.set_extra_rows( - dependency, - all_dates, - start_date, - end_date, + term=dependency, + all_dates=all_dates, + start_date=start_date, + end_date=end_date, min_extra_rows=extra_rows_for_term + additional_extra_rows, ) #@lazyval @property - def offset(self): + def offset(self) -> dict[tuple[Term, Term], int]: """ For all pairs (term, input) such that `input` is an input to `term`, compute a mapping:: @@ -404,7 +408,7 @@ def offset(self): out = {} for term in self.graph: for dep, requested_extra_rows in term.dependencies.items(): - specialized_dep = maybe_specialize(dep, self.domain) + specialized_dep = maybe_specialize(term=dep, domain=self.domain) # How much bigger is the result for dep compared to term? size_difference = extra[specialized_dep] - extra[term] @@ -418,7 +422,7 @@ def offset(self): #@lazyval @property - def extra_rows(self): + def extra_rows(self) -> dict[Term, int]: """ A dict mapping `term` -> `# of extra rows to load/compute of `term`. @@ -454,14 +458,14 @@ def extra_rows(self): return {term: self.graph.nodes[term]["extra_rows"] for term in self.graph.nodes} - def _ensure_extra_rows(self, term, N): + def _ensure_extra_rows(self, term: Term, N: int) -> None: """ Ensure that we're going to compute at least N extra rows of `term`. """ attrs = dict(self.graph.nodes())[term] attrs["extra_rows"] = max(N, attrs.get("extra_rows", 0)) - def mask_and_dates_for_term(self, term, root_mask_term, workspace, all_dates): + def mask_and_dates_for_term(self, term: Term, root_mask_term: Term, workspace: dict[Term, AdjustedArray], all_dates: pd.DatetimeIndex): """ Load mask and mask row labels for term. @@ -492,7 +496,7 @@ def mask_and_dates_for_term(self, term, root_mask_term, workspace, all_dates): return workspace[mask][mask_offset:], all_dates[dates_offset:] - def _assert_all_loadable_terms_specialized_to(self, domain): + def _assert_all_loadable_terms_specialized_to(self, domain: Domain) -> None: """Make sure that we've specialized all loadable terms in the graph.""" for term in self.graph.nodes(): if isinstance(term, LoadableTerm): @@ -501,7 +505,7 @@ def _assert_all_loadable_terms_specialized_to(self, domain): # XXX: This function exists because we currently only specialize LoadableTerms # when running a Pipeline on a given domain. -def maybe_specialize(term, domain): +def maybe_specialize(term: Term, domain: Domain) -> Term: """Specialize a term if it's loadable.""" if isinstance(term, LoadableTerm): return term.specialize(domain) diff --git a/ziplime/pipeline/hooks/delegate.py b/ziplime/pipeline/hooks/delegate.py index 0301ab77b4..cc9fbfc79a 100644 --- a/ziplime/pipeline/hooks/delegate.py +++ b/ziplime/pipeline/hooks/delegate.py @@ -1,34 +1,7 @@ -from ziplime.utils.compat import ExitStack, contextmanager, wraps -from .iface import PipelineHooks, PIPELINE_HOOKS_CONTEXT_MANAGERS +from .iface import PipelineHooks from .no import NoHooks - -def delegating_hooks_method(method_name): - """Factory function for making DelegatingHooks methods.""" - if method_name in PIPELINE_HOOKS_CONTEXT_MANAGERS: - # Generate a contextmanager that enters the context of all child hooks. - @wraps(getattr(PipelineHooks, method_name)) - @contextmanager - def ctx(self, *args, **kwargs): - with ExitStack() as stack: - for hook in self._hooks: - sub_ctx = getattr(hook, method_name)(*args, **kwargs) - stack.enter_context(sub_ctx) - yield stack - - return ctx - else: - # Generate a method that calls methods of all child hooks. - @wraps(getattr(PipelineHooks, method_name)) - def method(self, *args, **kwargs): - for hook in self._hooks: - sub_method = getattr(hook, method_name) - sub_method(*args, **kwargs) - - return method - - class DelegatingHooks(PipelineHooks): """A PipelineHooks that delegates to one or more other hooks. @@ -52,15 +25,4 @@ def __new__(cls, hooks): self._hooks = hooks return self - # Implement all interface methods by delegating to corresponding methods on - # input hooks. - # locals().update( - # { - # name: delegating_hooks_method(name) - # # TODO: Expose this publicly on interface. - # for name in PipelineHooks._signatures - # } - # ) - -del delegating_hooks_method diff --git a/ziplime/pipeline/hooks/iface.py b/ziplime/pipeline/hooks/iface.py index cd9c2307db..a8e0206f36 100644 --- a/ziplime/pipeline/hooks/iface.py +++ b/ziplime/pipeline/hooks/iface.py @@ -1,6 +1,7 @@ +import datetime from ziplime.utils.compat import contextmanager as _contextmanager - - +from ziplime.pipeline.terms.term import Term +from ziplime.pipeline.pipeline import Pipeline # Keep track of which methods of PipelineHooks are contextmanagers. Used by # DelegatingHooks to properly delegate to sub-hooks. PIPELINE_HOOKS_CONTEXT_MANAGERS = set() @@ -34,7 +35,7 @@ class PipelineHooks: """ @contextmanager - def running_pipeline(self, pipeline, start_date, end_date): + def running_pipeline(self, pipeline: Pipeline, start_date: datetime.date, end_date: datetime.date): """ Contextmanager entered during execution of run_pipeline or run_chunked_pipeline. @@ -50,7 +51,7 @@ def running_pipeline(self, pipeline, start_date, end_date): """ @contextmanager - def computing_chunk(self, terms, start_date, end_date): + def computing_chunk(self, terms: list[Term], start_date: datetime.date, end_date: datetime.date): """ Contextmanager entered during execution of compute_chunk. diff --git a/ziplime/pipeline/hooks/progress.py b/ziplime/pipeline/hooks/progress.py index 1c878ff40f..4dfaaa3fd0 100644 --- a/ziplime/pipeline/hooks/progress.py +++ b/ziplime/pipeline/hooks/progress.py @@ -1,6 +1,6 @@ """Pipeline hooks for tracking and displaying progress. """ - +import datetime from collections import namedtuple import time @@ -8,6 +8,7 @@ from ziplime.utils.string_formatting import bulleted_list from .iface import PipelineHooks +from .. import Term class ProgressHooks(PipelineHooks): @@ -71,7 +72,7 @@ def running_pipeline(self, pipeline, start_date, end_date): self._reset_transient_state() @contextmanager - def computing_chunk(self, terms, start_date, end_date): + def computing_chunk(self, terms: list[Term], start_date: datetime.date, end_date: datetime.date): # Set up model on first compute_chunk call. if self._model is None: self._publisher = self._publisher_factory() @@ -81,31 +82,31 @@ def computing_chunk(self, terms, start_date, end_date): ) try: - self._model.start_chunk(terms, start_date, end_date) + self._model.start_chunk(terms=terms, start_date=start_date, end_date=end_date) self._publish() yield finally: - self._model.finish_chunk(terms, start_date, end_date) + self._model.finish_chunk(terms=terms, start_date=start_date, end_date=end_date) self._publish() @contextmanager - def loading_terms(self, terms): + def loading_terms(self, terms: list[Term]): try: - self._model.start_load_terms(terms) + self._model.start_load_terms(terms=terms) self._publish() yield finally: - self._model.finish_load_terms(terms) + self._model.finish_load_terms(terms=terms) self._publish() @contextmanager - def computing_term(self, term): + def computing_term(self, term: Term): try: - self._model.start_compute_term(term) + self._model.start_compute_term(term=term) self._publish() yield finally: - self._model.finish_compute_term(term) + self._model.finish_compute_term(term=term) self._publish() @@ -150,7 +151,7 @@ class ProgressModel: List of terms currently being loaded or computed. """ - def __init__(self, start_date, end_date): + def __init__(self, start_date: datetime.date, end_date: datetime.date): self._start_date = start_date self._end_date = end_date @@ -210,7 +211,7 @@ def current_work(self): return self._current_work # These methods form the interface for ProgressHooks. - def start_chunk(self, terms, start_date, end_date): + def start_chunk(self, terms: list[Term], start_date: datetime.date, end_date: datetime.date): days_since_start = (end_date - self._start_date).days + 1 self._current_chunk_size = days_since_start - self._days_completed self._current_chunk_bounds = (start_date, end_date) diff --git a/ziplime/pipeline/loaders/__init__.py b/ziplime/pipeline/loaders/__init__.py index aa07584239..41b9b64102 100644 --- a/ziplime/pipeline/loaders/__init__.py +++ b/ziplime/pipeline/loaders/__init__.py @@ -1,9 +1,7 @@ from .equity_pricing_loader import ( EquityPricingLoader, - USEquityPricingLoader, ) __all__ = [ "EquityPricingLoader", - "USEquityPricingLoader", ] diff --git a/ziplime/pipeline/loaders/frame.py b/ziplime/pipeline/loaders/data_frame_loader.py similarity index 98% rename from ziplime/pipeline/loaders/frame.py rename to ziplime/pipeline/loaders/data_frame_loader.py index 2cf4075968..275f663cb8 100644 --- a/ziplime/pipeline/loaders/frame.py +++ b/ziplime/pipeline/loaders/data_frame_loader.py @@ -10,7 +10,7 @@ from ziplime.lib.adjusted_array import AdjustedArray from ziplime.lib.adjustment import make_adjustment_from_labels from ziplime.utils.numpy_utils import as_column -from .base import PipelineLoader +from .pipeline_loader import PipelineLoader ADJUSTMENT_COLUMNS = pd.Index( [ @@ -139,7 +139,7 @@ def format_adjustments(self, dates, assets): ) return out - def load_adjusted_array(self, domain, columns, dates, sids, mask): + async def load_adjusted_array(self, domain, columns, dates, sids, mask): """Load data from our stored baseline.""" if len(columns) != 1: diff --git a/ziplime/pipeline/loaders/earnings_estimates.py b/ziplime/pipeline/loaders/earnings_estimates.py deleted file mode 100644 index 347ace823a..0000000000 --- a/ziplime/pipeline/loaders/earnings_estimates.py +++ /dev/null @@ -1,1505 +0,0 @@ -from abc import abstractmethod - -import numpy as np -import pandas as pd -from toolz import groupby - -from ziplime.lib.adjusted_array import AdjustedArray -from ziplime.lib.adjustment import ( - Datetime641DArrayOverwrite, - Datetime64Overwrite, - Float641DArrayOverwrite, - Float64Multiply, - Float64Overwrite, -) - -from ziplime.pipeline.common import ( - EVENT_DATE_FIELD_NAME, - FISCAL_QUARTER_FIELD_NAME, - FISCAL_YEAR_FIELD_NAME, - SID_FIELD_NAME, - TS_FIELD_NAME, -) -from ziplime.pipeline.loaders.base import PipelineLoader -from ziplime.utils.date_utils import make_utc_aware -from ziplime.utils.numpy_utils import datetime64ns_dtype, float64_dtype -from ziplime.pipeline.loaders.utils import ( - ffill_across_cols, - last_in_date_group, -) - -INVALID_NUM_QTRS_MESSAGE = ( - "Passed invalid number of quarters %s; " "must pass a number of quarters >= 0" -) -NEXT_FISCAL_QUARTER = "next_fiscal_quarter" -NEXT_FISCAL_YEAR = "next_fiscal_year" -NORMALIZED_QUARTERS = "normalized_quarters" -PREVIOUS_FISCAL_QUARTER = "previous_fiscal_quarter" -PREVIOUS_FISCAL_YEAR = "previous_fiscal_year" -SHIFTED_NORMALIZED_QTRS = "shifted_normalized_quarters" -SIMULATION_DATES = "dates" - - -def normalize_quarters(years, quarters): - return years * 4 + quarters - 1 - - -def split_normalized_quarters(normalized_quarters): - years = normalized_quarters // 4 - quarters = normalized_quarters % 4 - return years, quarters + 1 - - -# These metadata columns are used to align event indexers. -metadata_columns = frozenset( - { - TS_FIELD_NAME, - SID_FIELD_NAME, - EVENT_DATE_FIELD_NAME, - FISCAL_QUARTER_FIELD_NAME, - FISCAL_YEAR_FIELD_NAME, - } -) - - -def required_estimates_fields(columns): - """Compute the set of resource columns required to serve - `columns`. - """ - # We also expect any of the field names that our loadable columns - # are mapped to. - return metadata_columns.union(columns.values()) - - -def validate_column_specs(events, columns): - """Verify that the columns of ``events`` can be used by a - EarningsEstimatesLoader to serve the BoundColumns described by - `columns`. - """ - required = required_estimates_fields(columns) - received = set(events.columns) - missing = required - received - if missing: - raise ValueError( - "EarningsEstimatesLoader missing required columns {missing}.\n" - "Got Columns: {received}\n" - "Expected Columns: {required}".format( - missing=sorted(missing), - received=sorted(received), - required=sorted(required), - ) - ) - - -def add_new_adjustments(adjustments_dict, adjustments, column_name, ts): - try: - adjustments_dict[column_name][ts].extend(adjustments) - except KeyError: - adjustments_dict[column_name][ts] = adjustments - - -class EarningsEstimatesLoader(PipelineLoader): - """An abstract pipeline loader for estimates data that can load data a - variable number of quarters forwards/backwards from calendar dates - depending on the `num_announcements` attribute of the columns' dataset. - If split adjustments are to be applied, a loader, split-adjusted columns, - and the split-adjusted asof-date must be supplied. - - Parameters - ---------- - estimates : pd.DataFrame - The raw estimates data; must contain at least 5 columns: - sid : int64 - The asset id associated with each estimate. - - event_date : datetime64[ns] - The date on which the event that the estimate is for will/has - occurred. - - timestamp : datetime64[ns] - The datetime where we learned about the estimate. - - fiscal_quarter : int64 - The quarter during which the event has/will occur. - - fiscal_year : int64 - The year during which the event has/will occur. - - name_map : dict[str -> str] - A map of names of BoundColumns that this loader will load to the - names of the corresponding columns in `events`. - """ - - def __init__(self, estimates, name_map): - validate_column_specs(estimates, name_map) - - self.estimates = estimates[ - estimates[EVENT_DATE_FIELD_NAME].notnull() - & estimates[FISCAL_QUARTER_FIELD_NAME].notnull() - & estimates[FISCAL_YEAR_FIELD_NAME].notnull() - ] - self.estimates[NORMALIZED_QUARTERS] = normalize_quarters( - self.estimates[FISCAL_YEAR_FIELD_NAME], - self.estimates[FISCAL_QUARTER_FIELD_NAME], - ) - - self.array_overwrites_dict = { - datetime64ns_dtype: Datetime641DArrayOverwrite, - float64_dtype: Float641DArrayOverwrite, - } - self.scalar_overwrites_dict = { - datetime64ns_dtype: Datetime64Overwrite, - float64_dtype: Float64Overwrite, - } - - self.name_map = name_map - - @abstractmethod - def get_zeroth_quarter_idx(self, stacked_last_per_qtr): - raise NotImplementedError("get_zeroth_quarter_idx") - - @abstractmethod - def get_shifted_qtrs(self, zero_qtrs, num_announcements): - raise NotImplementedError("get_shifted_qtrs") - - @abstractmethod - def create_overwrite_for_estimate( - self, - column, - column_name, - last_per_qtr, - next_qtr_start_idx, - requested_quarter, - sid, - sid_idx, - col_to_split_adjustments, - split_adjusted_asof_idx, - ): - raise NotImplementedError("create_overwrite_for_estimate") - - @property - @abstractmethod - def searchsorted_side(self): - return NotImplementedError("searchsorted_side") - - def get_requested_quarter_data( - self, - zero_qtr_data, - zeroth_quarter_idx, - stacked_last_per_qtr, - num_announcements, - dates, - ): - """Selects the requested data for each date. - - Parameters - ---------- - zero_qtr_data : pd.DataFrame - The 'time zero' data for each calendar date per sid. - zeroth_quarter_idx : pd.Index - An index of calendar dates, sid, and normalized quarters, for only - the rows that have a next or previous earnings estimate. - stacked_last_per_qtr : pd.DataFrame - The latest estimate known with the dates, normalized quarter, and - sid as the index. - num_announcements : int - The number of annoucements out the user requested relative to - each date in the calendar dates. - dates : pd.DatetimeIndex - The calendar dates for which estimates data is requested. - - Returns - -------- - requested_qtr_data : pd.DataFrame - The DataFrame with the latest values for the requested quarter - for all columns; `dates` are the index and columns are a MultiIndex - with sids at the top level and the dataset columns on the bottom. - """ - zero_qtr_data_idx = zero_qtr_data.index - requested_qtr_idx = pd.MultiIndex.from_arrays( - [ - zero_qtr_data_idx.get_level_values(0), - zero_qtr_data_idx.get_level_values(1), - self.get_shifted_qtrs( - zeroth_quarter_idx.get_level_values( - NORMALIZED_QUARTERS, - ), - num_announcements, - ), - ], - names=[ - zero_qtr_data_idx.names[0], - zero_qtr_data_idx.names[1], - SHIFTED_NORMALIZED_QTRS, - ], - ) - - requested_qtr_data = stacked_last_per_qtr.reindex(index=requested_qtr_idx) - requested_qtr_data = requested_qtr_data.reset_index( - SHIFTED_NORMALIZED_QTRS, - ) - # Calculate the actual year/quarter being requested and add those in - # as columns. - ( - requested_qtr_data[FISCAL_YEAR_FIELD_NAME], - requested_qtr_data[FISCAL_QUARTER_FIELD_NAME], - ) = split_normalized_quarters(requested_qtr_data[SHIFTED_NORMALIZED_QTRS]) - # Once we're left with just dates as the index, we can reindex by all - # dates so that we have a value for each calendar date. - return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates) - - def get_split_adjusted_asof_idx(self, dates): - """Compute the index in `dates` where the split-adjusted-asof-date - falls. This is the date up to which, and including which, we will - need to unapply all adjustments for and then re-apply them as they - come in. After this date, adjustments are applied as normal. - - Parameters - ---------- - dates : pd.DatetimeIndex - The calendar dates over which the Pipeline is being computed. - - Returns - ------- - split_adjusted_asof_idx : int - The index in `dates` at which the data should be split. - """ - split_adjusted_asof_idx = dates.searchsorted(self._split_adjusted_asof) - # make_utc_aware(pd.DatetimeIndex(self._split_adjusted_asof)) - # The split-asof date is after the date index. - if split_adjusted_asof_idx == len(dates): - split_adjusted_asof_idx = len(dates) - 1 - if self._split_adjusted_asof.tzinfo is not None: - if self._split_adjusted_asof < dates[0]: - split_adjusted_asof_idx = -1 - else: - if self._split_adjusted_asof < dates[0]: - split_adjusted_asof_idx = -1 - return split_adjusted_asof_idx - - def collect_overwrites_for_sid( - self, - group, - dates, - requested_qtr_data, - last_per_qtr, - sid_idx, - columns, - all_adjustments_for_sid, - sid, - ): - """Given a sid, collect all overwrites that should be applied for this - sid at each quarter boundary. - - Parameters - ---------- - group : pd.DataFrame - The data for `sid`. - dates : pd.DatetimeIndex - The calendar dates for which estimates data is requested. - requested_qtr_data : pd.DataFrame - The DataFrame with the latest values for the requested quarter - for all columns. - last_per_qtr : pd.DataFrame - A DataFrame with a column MultiIndex of [self.estimates.columns, - normalized_quarters, sid] that allows easily getting the timeline - of estimates for a particular sid for a particular quarter. - sid_idx : int - The sid's index in the asset index. - columns : list of BoundColumn - The columns for which the overwrites should be computed. - all_adjustments_for_sid : dict[int -> AdjustedArray] - A dictionary of the integer index of each timestamp into the date - index, mapped to adjustments that should be applied at that - index for the given sid (`sid`). This dictionary is modified as - adjustments are collected. - sid : int - The sid for which overwrites should be computed. - """ - # If data was requested for only 1 date, there can never be any - # overwrites, so skip the extra work. - if len(dates) == 1: - return - - next_qtr_start_indices = dates.searchsorted( - pd.DatetimeIndex(group[EVENT_DATE_FIELD_NAME]), - side=self.searchsorted_side, - ) - - qtrs_with_estimates = group.index.get_level_values(NORMALIZED_QUARTERS).values - for idx in next_qtr_start_indices: - if 0 < idx < len(dates): - # Find the quarter being requested in the quarter we're - # crossing into. - requested_quarter = requested_qtr_data[ - SHIFTED_NORMALIZED_QTRS, - sid, - ].iloc[idx] - # Only add adjustments if the next quarter starts somewhere - # in our date index for this sid. Our 'next' quarter can - # never start at index 0; a starting index of 0 means that - # the next quarter's event date was NaT. - self.create_overwrites_for_quarter( - all_adjustments_for_sid, - idx, - last_per_qtr, - qtrs_with_estimates, - requested_quarter, - sid, - sid_idx, - columns, - ) - - def get_adjustments_for_sid( - self, - group, - dates, - requested_qtr_data, - last_per_qtr, - sid_to_idx, - columns, - col_to_all_adjustments, - **kwargs, - ): - """ - - Parameters - ---------- - group : pd.DataFrame - The data for the given sid. - dates : pd.DatetimeIndex - The calendar dates for which estimates data is requested. - requested_qtr_data : pd.DataFrame - The DataFrame with the latest values for the requested quarter - for all columns. - last_per_qtr : pd.DataFrame - A DataFrame with a column MultiIndex of [self.estimates.columns, - normalized_quarters, sid] that allows easily getting the timeline - of estimates for a particular sid for a particular quarter. - sid_to_idx : dict[int -> int] - A dictionary mapping sid to he sid's index in the asset index. - columns : list of BoundColumn - The columns for which the overwrites should be computed. - col_to_all_adjustments : dict[int -> AdjustedArray] - A dictionary of the integer index of each timestamp into the date - index, mapped to adjustments that should be applied at that - index. This dictionary is for adjustments for ALL sids. It is - modified as adjustments are collected. - kwargs : - Additional arguments used in collecting adjustments; unused here. - """ - # Collect all adjustments for a given sid. - all_adjustments_for_sid = {} - sid = int(group.name) - self.collect_overwrites_for_sid( - group, - dates, - requested_qtr_data, - last_per_qtr, - sid_to_idx[sid], - columns, - all_adjustments_for_sid, - sid, - ) - self.merge_into_adjustments_for_all_sids( - all_adjustments_for_sid, col_to_all_adjustments - ) - - def merge_into_adjustments_for_all_sids( - self, all_adjustments_for_sid, col_to_all_adjustments - ): - """Merge adjustments for a particular sid into a dictionary containing - adjustments for all sids. - - Parameters - ---------- - all_adjustments_for_sid : dict[int -> AdjustedArray] - All adjustments for a particular sid. - col_to_all_adjustments : dict[int -> AdjustedArray] - All adjustments for all sids. - """ - - for col_name in all_adjustments_for_sid: - if col_name not in col_to_all_adjustments: - col_to_all_adjustments[col_name] = {} - for ts in all_adjustments_for_sid[col_name]: - adjs = all_adjustments_for_sid[col_name][ts] - add_new_adjustments(col_to_all_adjustments, adjs, col_name, ts) - - def get_adjustments( - self, - zero_qtr_data, - requested_qtr_data, - last_per_qtr, - dates, - assets, - columns, - **kwargs, - ): - """Creates an AdjustedArray from the given estimates data for the given - dates. - - Parameters - ---------- - zero_qtr_data : pd.DataFrame - The 'time zero' data for each calendar date per sid. - requested_qtr_data : pd.DataFrame - The requested quarter data for each calendar date per sid. - last_per_qtr : pd.DataFrame - A DataFrame with a column MultiIndex of [self.estimates.columns, - normalized_quarters, sid] that allows easily getting the timeline - of estimates for a particular sid for a particular quarter. - dates : pd.DatetimeIndex - The calendar dates for which estimates data is requested. - assets : pd.Int64Index - An index of all the assets from the raw data. - columns : list of BoundColumn - The columns for which adjustments need to be calculated. - kwargs : - Additional keyword arguments that should be forwarded to - `get_adjustments_for_sid` and to be used in computing adjustments - for each sid. - - Returns - ------- - col_to_all_adjustments : dict[int -> AdjustedArray] - A dictionary of all adjustments that should be applied. - """ - - zero_qtr_data.sort_index(inplace=True) - # Here we want to get the LAST record from each group of records - # corresponding to a single quarter. This is to ensure that we select - # the most up-to-date event date in case the event date changes. - quarter_shifts = zero_qtr_data.groupby( - level=[SID_FIELD_NAME, NORMALIZED_QUARTERS] - ).nth(-1) - - col_to_all_adjustments = {} - sid_to_idx = dict(zip(assets, range(len(assets)))) - quarter_shifts.groupby(level=SID_FIELD_NAME).apply( - self.get_adjustments_for_sid, - dates, - requested_qtr_data, - last_per_qtr, - sid_to_idx, - columns, - col_to_all_adjustments, - **kwargs, - ) - return col_to_all_adjustments - - def create_overwrites_for_quarter( - self, - col_to_overwrites, - next_qtr_start_idx, - last_per_qtr, - quarters_with_estimates_for_sid, - requested_quarter, - sid, - sid_idx, - columns, - ): - """Add entries to the dictionary of columns to adjustments for the given - sid and the given quarter. - - Parameters - ---------- - col_to_overwrites : dict [column_name -> list of ArrayAdjustment] - A dictionary mapping column names to all overwrites for those - columns. - next_qtr_start_idx : int - The index of the first day of the next quarter in the calendar - dates. - last_per_qtr : pd.DataFrame - A DataFrame with a column MultiIndex of [self.estimates.columns, - normalized_quarters, sid] that allows easily getting the timeline - of estimates for a particular sid for a particular quarter; this - is particularly useful for getting adjustments for 'next' - estimates. - quarters_with_estimates_for_sid : np.array - An array of all quarters for which there are estimates for the - given sid. - requested_quarter : float - The quarter for which the overwrite should be created. - sid : int - The sid for which to create overwrites. - sid_idx : int - The index of the sid in `assets`. - columns : list of BoundColumn - The columns for which to create overwrites. - """ - for col in columns: - column_name = self.name_map[col.name] - if column_name not in col_to_overwrites: - col_to_overwrites[column_name] = {} - # If there are estimates for the requested quarter, - # overwrite all values going up to the starting index of - # that quarter with estimates for that quarter. - if requested_quarter in quarters_with_estimates_for_sid: - adjs = self.create_overwrite_for_estimate( - col, - column_name, - last_per_qtr, - next_qtr_start_idx, - requested_quarter, - sid, - sid_idx, - ) - add_new_adjustments( - col_to_overwrites, adjs, column_name, next_qtr_start_idx - ) - # There are no estimates for the quarter. Overwrite all - # values going up to the starting index of that quarter - # with the missing value for this column. - else: - adjs = [self.overwrite_with_null(col, next_qtr_start_idx, sid_idx)] - add_new_adjustments( - col_to_overwrites, adjs, column_name, next_qtr_start_idx - ) - - def overwrite_with_null(self, column, next_qtr_start_idx, sid_idx): - return self.scalar_overwrites_dict[column.dtype]( - 0, next_qtr_start_idx - 1, sid_idx, sid_idx, column.missing_value - ) - - def load_adjusted_array(self, domain, columns, dates, sids, mask): - # Separate out getting the columns' datasets and the datasets' - # num_announcements attributes to ensure that we're catching the right - # AttributeError. - col_to_datasets = {col: col.dataset for col in columns} - try: - groups = groupby( - lambda col: col_to_datasets[col].num_announcements, col_to_datasets - ) - except AttributeError as exc: - raise AttributeError( - "Datasets loaded via the " - "EarningsEstimatesLoader must define a " - "`num_announcements` attribute that defines " - "how many quarters out the loader should load" - " the data relative to `dates`." - ) from exc - if any(num_qtr < 0 for num_qtr in groups): - raise ValueError( - INVALID_NUM_QTRS_MESSAGE - % ",".join(str(qtr) for qtr in groups if qtr < 0) - ) - out = {} - # To optimize performance, only work below on assets that are - # actually in the raw data. - data_query_cutoff_times = domain.data_query_cutoff_for_sessions(dates) - assets_with_data = set(sids) & set(self.estimates[SID_FIELD_NAME]) - last_per_qtr, stacked_last_per_qtr = self.get_last_data_per_qtr( - assets_with_data, - columns, - dates, - data_query_cutoff_times, - ) - # Determine which quarter is immediately next/previous for each - # date. - zeroth_quarter_idx = self.get_zeroth_quarter_idx(stacked_last_per_qtr) - zero_qtr_data = stacked_last_per_qtr.loc[zeroth_quarter_idx] - - for num_announcements, columns in groups.items(): - requested_qtr_data = self.get_requested_quarter_data( - zero_qtr_data, - zeroth_quarter_idx, - stacked_last_per_qtr, - num_announcements, - dates, - ) - - # Calculate all adjustments for the given quarter and accumulate - # them for each column. - col_to_adjustments = self.get_adjustments( - zero_qtr_data, requested_qtr_data, last_per_qtr, dates, sids, columns - ) - - # Lookup the asset indexer once, this is so we can reindex - # the assets returned into the assets requested for each column. - # This depends on the fact that our column pd.MultiIndex has the same - # sids for each field. This allows us to do the lookup once on - # level 1 instead of doing the lookup each time per value in - # level 0. - # asset_indexer = sids.get_indexer_for( - # requested_qtr_data.columns.levels[1], - # ) - for col in columns: - column_name = self.name_map[col.name] - # allocate the empty output with the correct missing value - # shape = len(dates), len(sids) - # output_array = np.full(shape=shape, - # fill_value=col.missing_value, - # dtype=col.dtype) - # overwrite the missing value with values from the computed data - try: - output_array = ( - requested_qtr_data[column_name] - .reindex(sids, axis=1) - .to_numpy() - .astype(col.dtype) - ) - except Exception: - output_array = ( - requested_qtr_data[column_name] - .reindex(sids, axis=1) - .to_numpy(na_value=col.missing_value) - .astype(col.dtype) - ) - - # except ValueError: - # np.copyto(output_array[:, asset_indexer], - # requested_qtr_data[column_name].to_numpy(na_value=output_array.dtype), - # casting='unsafe') - out[col] = AdjustedArray( - output_array, - # There may not be any adjustments at all (e.g. if - # len(date) == 1), so provide a default. - dict(col_to_adjustments.get(column_name, {})), - col.missing_value, - ) - return out - - def get_last_data_per_qtr( - self, assets_with_data, columns, dates, data_query_cutoff_times - ): - """Determine the last piece of information we know for each column on each - date in the index for each sid and quarter. - - Parameters - ---------- - assets_with_data : pd.Index - Index of all assets that appear in the raw data given to the - loader. - columns : iterable of BoundColumn - The columns that need to be loaded from the raw data. - data_query_cutoff_times : pd.DatetimeIndex - The calendar of dates for which data should be loaded. - - Returns - ------- - stacked_last_per_qtr : pd.DataFrame - A DataFrame indexed by [dates, sid, normalized_quarters] that has - the latest information for each row of the index, sorted by event - date. - last_per_qtr : pd.DataFrame - A DataFrame with columns that are a MultiIndex of [ - self.estimates.columns, normalized_quarters, sid]. - """ - # Get a DataFrame indexed by date with a MultiIndex of columns of - # [self.estimates.columns, normalized_quarters, sid], where each cell - # contains the latest data for that day. - last_per_qtr = last_in_date_group( - self.estimates, - data_query_cutoff_times, - assets_with_data, - reindex=True, - extra_groupers=[NORMALIZED_QUARTERS], - ) - last_per_qtr.index = dates - # Forward fill values for each quarter/sid/dataset column. - ffill_across_cols(last_per_qtr, columns, self.name_map) - # Stack quarter and sid into the index. - stacked_last_per_qtr = last_per_qtr.stack( - [SID_FIELD_NAME, NORMALIZED_QUARTERS], - ) - # Set date index name for ease of reference - stacked_last_per_qtr.index.set_names( - SIMULATION_DATES, - level=0, - inplace=True, - ) - stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime( - stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] - ) - stacked_last_per_qtr = stacked_last_per_qtr.sort_values(EVENT_DATE_FIELD_NAME) - return last_per_qtr, stacked_last_per_qtr - - -class NextEarningsEstimatesLoader(EarningsEstimatesLoader): - searchsorted_side = "right" - - def create_overwrite_for_estimate( - self, - column, - column_name, - last_per_qtr, - next_qtr_start_idx, - requested_quarter, - sid, - sid_idx, - col_to_split_adjustments=None, - split_adjusted_asof_idx=None, - ): - return [ - self.array_overwrites_dict[column.dtype]( - 0, - next_qtr_start_idx - 1, - sid_idx, - sid_idx, - last_per_qtr[ - column_name, - requested_quarter, - sid, - ].values[:next_qtr_start_idx], - ) - ] - - def get_shifted_qtrs(self, zero_qtrs, num_announcements): - return zero_qtrs + (num_announcements - 1) - - def get_zeroth_quarter_idx(self, stacked_last_per_qtr): - """Filters for releases that are on or after each simulation date and - determines the next quarter by picking out the upcoming release for - each date in the index. - - Parameters - ---------- - stacked_last_per_qtr : pd.DataFrame - A DataFrame with index of calendar dates, sid, and normalized - quarters with each row being the latest estimate for the row's - index values, sorted by event date. - - Returns - ------- - next_releases_per_date_index : pd.MultiIndex - An index of calendar dates, sid, and normalized quarters, for only - the rows that have a next event. - """ - next_releases_per_date = ( - stacked_last_per_qtr.loc[ - stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] - >= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES) - ] - .groupby( - level=[SIMULATION_DATES, SID_FIELD_NAME], - as_index=False, - # Here we take advantage of the fact that `stacked_last_per_qtr` is - # sorted by event date. - ) - .nth(0) - ) - return next_releases_per_date.index - - -class PreviousEarningsEstimatesLoader(EarningsEstimatesLoader): - searchsorted_side = "left" - - def create_overwrite_for_estimate( - self, - column, - column_name, - dates, - next_qtr_start_idx, - requested_quarter, - sid, - sid_idx, - col_to_split_adjustments=None, - split_adjusted_asof_idx=None, - split_dict=None, - ): - return [ - self.overwrite_with_null( - column, - next_qtr_start_idx, - sid_idx, - ) - ] - - def get_shifted_qtrs(self, zero_qtrs, num_announcements): - return zero_qtrs - (num_announcements - 1) - - def get_zeroth_quarter_idx(self, stacked_last_per_qtr): - """Filters for releases that are on or after each simulation date and - determines the previous quarter by picking out the most recent - release relative to each date in the index. - - Parameters - ---------- - stacked_last_per_qtr : pd.DataFrame - A DataFrame with index of calendar dates, sid, and normalized - quarters with each row being the latest estimate for the row's - index values, sorted by event date. - - Returns - ------- - previous_releases_per_date_index : pd.MultiIndex - An index of calendar dates, sid, and normalized quarters, for only - the rows that have a previous event. - """ - previous_releases_per_date = ( - stacked_last_per_qtr.loc[ - stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] - <= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES) - ] - .groupby( - level=[SIMULATION_DATES, SID_FIELD_NAME], - as_index=False, - # Here we take advantage of the fact that `stacked_last_per_qtr` is - # sorted by event date. - ) - .nth(-1) - ) - return previous_releases_per_date.index - - -def validate_split_adjusted_column_specs(name_map, columns): - to_be_split = set(columns) - available = set(name_map.keys()) - extra = to_be_split - available - if extra: - raise ValueError( - "EarningsEstimatesLoader got the following extra columns to be " - "split-adjusted: {extra}.\n" - "Got Columns: {to_be_split}\n" - "Available Columns: {available}".format( - extra=sorted(extra), - to_be_split=sorted(to_be_split), - available=sorted(available), - ) - ) - - -class SplitAdjustedEstimatesLoader(EarningsEstimatesLoader): - """Estimates loader that loads data that needs to be split-adjusted. - - Parameters - ---------- - split_adjustments_loader : SQLiteAdjustmentReader - The loader to use for reading split adjustments. - split_adjusted_column_names : iterable of str - The column names that should be split-adjusted. - split_adjusted_asof : pd.Timestamp - The date that separates data into 2 halves: the first half is the set - of dates up to and including the split_adjusted_asof date. All - adjustments occurring during this first half are applied to all - dates in this first half. The second half is the set of dates after - the split_adjusted_asof date. All adjustments occurring during this - second half are applied sequentially as they appear in the timeline. - """ - - def __init__( - self, - estimates, - name_map, - split_adjustments_loader, - split_adjusted_column_names, - split_adjusted_asof, - ): - validate_split_adjusted_column_specs(name_map, split_adjusted_column_names) - self._split_adjustments = split_adjustments_loader - self._split_adjusted_column_names = split_adjusted_column_names - self._split_adjusted_asof = split_adjusted_asof - self._split_adjustment_dict = {} - super(SplitAdjustedEstimatesLoader, self).__init__(estimates, name_map) - - @abstractmethod - def collect_split_adjustments( - self, - adjustments_for_sid, - requested_qtr_data, - dates, - sid, - sid_idx, - sid_estimates, - split_adjusted_asof_idx, - pre_adjustments, - post_adjustments, - requested_split_adjusted_columns, - ): - raise NotImplementedError("collect_split_adjustments") - - def get_adjustments_for_sid( - self, - group, - dates, - requested_qtr_data, - last_per_qtr, - sid_to_idx, - columns, - col_to_all_adjustments, - split_adjusted_asof_idx=None, - split_adjusted_cols_for_group=None, - ): - """Collects both overwrites and adjustments for a particular sid. - - Parameters - ---------- - split_adjusted_asof_idx : int - The integer index of the date on which the data was split-adjusted. - split_adjusted_cols_for_group : list of str - The names of requested columns that should also be split-adjusted. - """ - all_adjustments_for_sid = {} - sid = int(group.name) - self.collect_overwrites_for_sid( - group, - dates, - requested_qtr_data, - last_per_qtr, - sid_to_idx[sid], - columns, - all_adjustments_for_sid, - sid, - ) - ( - pre_adjustments, - post_adjustments, - ) = self.retrieve_split_adjustment_data_for_sid( - dates, sid, split_adjusted_asof_idx - ) - sid_estimates = self.estimates[self.estimates[SID_FIELD_NAME] == sid] - # We might not have any overwrites but still have - # adjustments, and we will need to manually add columns if - # that is the case. - for col_name in split_adjusted_cols_for_group: - if col_name not in all_adjustments_for_sid: - all_adjustments_for_sid[col_name] = {} - - self.collect_split_adjustments( - all_adjustments_for_sid, - requested_qtr_data, - dates, - sid, - sid_to_idx[sid], - sid_estimates, - split_adjusted_asof_idx, - pre_adjustments, - post_adjustments, - split_adjusted_cols_for_group, - ) - self.merge_into_adjustments_for_all_sids( - all_adjustments_for_sid, col_to_all_adjustments - ) - - def get_adjustments( - self, - zero_qtr_data, - requested_qtr_data, - last_per_qtr, - dates, - assets, - columns, - **kwargs, - ): - """Calculates both split adjustments and overwrites for all sids.""" - split_adjusted_cols_for_group = [ - self.name_map[col.name] - for col in columns - if self.name_map[col.name] in self._split_adjusted_column_names - ] - # Add all splits to the adjustment dict for this sid. - split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(dates) - return super(SplitAdjustedEstimatesLoader, self).get_adjustments( - zero_qtr_data, - requested_qtr_data, - last_per_qtr, - dates, - assets, - columns, - split_adjusted_cols_for_group=split_adjusted_cols_for_group, - split_adjusted_asof_idx=split_adjusted_asof_idx, - ) - - def determine_end_idx_for_adjustment( - self, adjustment_ts, dates, upper_bound, requested_quarter, sid_estimates - ): - """Determines the date until which the adjustment at the given date - index should be applied for the given quarter. - - Parameters - ---------- - adjustment_ts : pd.Timestamp - The timestamp at which the adjustment occurs. - dates : pd.DatetimeIndex - The calendar dates over which the Pipeline is being computed. - upper_bound : int - The index of the upper bound in the calendar dates. This is the - index until which the adjusment will be applied unless there is - information for the requested quarter that comes in on or before - that date. - requested_quarter : float - The quarter for which we are determining how the adjustment - should be applied. - sid_estimates : pd.DataFrame - The DataFrame of estimates data for the sid for which we're - applying the given adjustment. - - Returns - ------- - end_idx : int - The last index to which the adjustment should be applied for the - given quarter/sid. - """ - end_idx = upper_bound - # Find the next newest kd that happens on or after - # the date of this adjustment - newest_kd_for_qtr = sid_estimates[ - (sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) - & (sid_estimates[TS_FIELD_NAME] >= adjustment_ts) - ][TS_FIELD_NAME].min() - if pd.notnull(newest_kd_for_qtr): - newest_kd_idx = dates.searchsorted(newest_kd_for_qtr) - # make_utc_aware(pd.DatetimeIndex(newest_kd_for_qtr)) - # We have fresh information that comes in - # before the end of the overwrite and - # presumably is already split-adjusted to the - # current split. We should stop applying the - # adjustment the day before this new - # information comes in. - if newest_kd_idx <= upper_bound: - end_idx = newest_kd_idx - 1 - return end_idx - - def collect_pre_split_asof_date_adjustments( - self, - split_adjusted_asof_date_idx, - sid_idx, - pre_adjustments, - requested_split_adjusted_columns, - ): - """Collect split adjustments that occur before the - split-adjusted-asof-date. All those adjustments must first be - UN-applied at the first date index and then re-applied on the - appropriate dates in order to match point in time share pricing data. - - Parameters - ---------- - split_adjusted_asof_date_idx : int - The index in the calendar dates as-of which all data was - split-adjusted. - sid_idx : int - The index of the sid for which adjustments should be collected in - the adjusted array. - pre_adjustments : tuple(list(float), list(int)) - The adjustment values, indexes in `dates`, and timestamps for - adjustments that happened after the split-asof-date. - requested_split_adjusted_columns : list of str - The requested split adjusted columns. - - Returns - ------- - col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] - The adjustments for this sid that occurred on or before the - split-asof-date. - """ - col_to_split_adjustments = {} - if len(pre_adjustments[0]): - adjustment_values, date_indexes = pre_adjustments - for column_name in requested_split_adjusted_columns: - col_to_split_adjustments[column_name] = {} - # We need to undo all adjustments that happen before the - # split_asof_date here by reversing the split ratio. - col_to_split_adjustments[column_name][0] = [ - Float64Multiply( - 0, - split_adjusted_asof_date_idx, - sid_idx, - sid_idx, - 1 / future_adjustment, - ) - for future_adjustment in adjustment_values - ] - - for adjustment, date_index in zip(adjustment_values, date_indexes): - adj = Float64Multiply( - 0, split_adjusted_asof_date_idx, sid_idx, sid_idx, adjustment - ) - add_new_adjustments( - col_to_split_adjustments, [adj], column_name, date_index - ) - - return col_to_split_adjustments - - def collect_post_asof_split_adjustments( - self, - post_adjustments, - requested_qtr_data, - sid, - sid_idx, - sid_estimates, - requested_split_adjusted_columns, - ): - """Collect split adjustments that occur after the - split-adjusted-asof-date. Each adjustment needs to be applied to all - dates on which knowledge for the requested quarter was older than the - date of the adjustment. - - Parameters - ---------- - post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) - The adjustment values, indexes in `dates`, and timestamps for - adjustments that happened after the split-asof-date. - requested_qtr_data : pd.DataFrame - The requested quarter data for each calendar date per sid. - sid : int - The sid for which adjustments need to be collected. - sid_idx : int - The index of `sid` in the adjusted array. - sid_estimates : pd.DataFrame - The raw estimates data for this sid. - requested_split_adjusted_columns : list of str - The requested split adjusted columns. - Returns - ------- - col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] - The adjustments for this sid that occurred after the - split-asof-date. - """ - col_to_split_adjustments = {} - if post_adjustments: - # Get an integer index - requested_qtr_timeline = requested_qtr_data[SHIFTED_NORMALIZED_QTRS][ - sid - ].reset_index() - requested_qtr_timeline = requested_qtr_timeline[ - requested_qtr_timeline[sid].notnull() - ] - - # Split the data into range by quarter and determine which quarter - # was being requested in each range. - # Split integer indexes up by quarter range - qtr_ranges_idxs = np.split( - requested_qtr_timeline.index, - np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1, - ) - requested_quarters_per_range = [ - requested_qtr_timeline[sid][r[0]] for r in qtr_ranges_idxs - ] - # Try to apply each adjustment to each quarter range. - for i, qtr_range in enumerate(qtr_ranges_idxs): - for adjustment, date_index, timestamp in zip(*post_adjustments): - # In the default case, apply through the end of the quarter - upper_bound = qtr_range[-1] - # Find the smallest KD in estimates that is on or after the - # date of the given adjustment. Apply the given adjustment - # until that KD. - end_idx = self.determine_end_idx_for_adjustment( - timestamp, - requested_qtr_data.index, - upper_bound, - requested_quarters_per_range[i], - sid_estimates, - ) - # In the default case, apply adjustment on the first day of - # the quarter. - start_idx = qtr_range[0] - # If the adjustment happens during this quarter, apply the - # adjustment on the day it happens. - if date_index > start_idx: - start_idx = date_index - # We only want to apply the adjustment if we have any stale - # data to apply it to. - if qtr_range[0] <= end_idx: - for column_name in requested_split_adjusted_columns: - if column_name not in col_to_split_adjustments: - col_to_split_adjustments[column_name] = {} - adj = Float64Multiply( - # Always apply from first day of qtr - qtr_range[0], - end_idx, - sid_idx, - sid_idx, - adjustment, - ) - add_new_adjustments( - col_to_split_adjustments, [adj], column_name, start_idx - ) - - return col_to_split_adjustments - - def retrieve_split_adjustment_data_for_sid( - self, dates, sid, split_adjusted_asof_idx - ): - """ - - dates : pd.DatetimeIndex - The calendar dates. - sid : int - The sid for which we want to retrieve adjustments. - split_adjusted_asof_idx : int - The index in `dates` as-of which the data is split adjusted. - - Returns - ------- - pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) - The adjustment values and indexes in `dates` for - adjustments that happened before the split-asof-date. - post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) - The adjustment values, indexes in `dates`, and timestamps for - adjustments that happened after the split-asof-date. - """ - adjustments = self._split_adjustments.get_adjustments_for_sid("splits", sid) - sorted(adjustments, key=lambda adj: adj[0]) - # Get rid of any adjustments that happen outside of our date index. - adjustments = list(filter(lambda x: dates[0] <= x[0] <= dates[-1], adjustments)) - adjustment_values = np.array([adj[1] for adj in adjustments]) - timestamps = pd.DatetimeIndex([adj[0] for adj in adjustments]) - # We need the first date on which we would have known about each - # adjustment. - date_indexes = dates.searchsorted(timestamps) - pre_adjustment_idxs = np.where(date_indexes <= split_adjusted_asof_idx)[0] - last_adjustment_split_asof_idx = -1 - if len(pre_adjustment_idxs): - last_adjustment_split_asof_idx = pre_adjustment_idxs.max() - pre_adjustments = ( - adjustment_values[: last_adjustment_split_asof_idx + 1], - date_indexes[: last_adjustment_split_asof_idx + 1], - ) - post_adjustments = ( - adjustment_values[last_adjustment_split_asof_idx + 1 :], - date_indexes[last_adjustment_split_asof_idx + 1 :], - timestamps[last_adjustment_split_asof_idx + 1 :], - ) - return pre_adjustments, post_adjustments - - def _collect_adjustments( - self, - requested_qtr_data, - sid, - sid_idx, - sid_estimates, - split_adjusted_asof_idx, - pre_adjustments, - post_adjustments, - requested_split_adjusted_columns, - ): - pre_adjustments_dict = self.collect_pre_split_asof_date_adjustments( - split_adjusted_asof_idx, - sid_idx, - pre_adjustments, - requested_split_adjusted_columns, - ) - - post_adjustments_dict = self.collect_post_asof_split_adjustments( - post_adjustments, - requested_qtr_data, - sid, - sid_idx, - sid_estimates, - requested_split_adjusted_columns, - ) - return pre_adjustments_dict, post_adjustments_dict - - def merge_split_adjustments_with_overwrites( - self, pre, post, overwrites, requested_split_adjusted_columns - ): - """Merge split adjustments with the dict containing overwrites. - - Parameters - ---------- - pre : dict[str -> dict[int -> list]] - The adjustments that occur before the split-adjusted-asof-date. - post : dict[str -> dict[int -> list]] - The adjustments that occur after the split-adjusted-asof-date. - overwrites : dict[str -> dict[int -> list]] - The overwrites across all time. Adjustments will be merged into - this dictionary. - requested_split_adjusted_columns : list of str - List of names of split adjusted columns that are being requested. - """ - for column_name in requested_split_adjusted_columns: - # We can do a merge here because the timestamps in 'pre' and - # 'post' are guaranteed to not overlap. - if pre: - # Either empty or contains all columns. - for ts in pre[column_name]: - add_new_adjustments( - overwrites, pre[column_name][ts], column_name, ts - ) - if post: - # Either empty or contains all columns. - for ts in post[column_name]: - add_new_adjustments( - overwrites, post[column_name][ts], column_name, ts - ) - - -class PreviousSplitAdjustedEarningsEstimatesLoader( - SplitAdjustedEstimatesLoader, PreviousEarningsEstimatesLoader -): - def collect_split_adjustments( - self, - adjustments_for_sid, - requested_qtr_data, - dates, - sid, - sid_idx, - sid_estimates, - split_adjusted_asof_idx, - pre_adjustments, - post_adjustments, - requested_split_adjusted_columns, - ): - """Collect split adjustments for previous quarters and apply them to the - given dictionary of splits for the given sid. Since overwrites just - replace all estimates before the new quarter with NaN, we don't need to - worry about re-applying split adjustments. - - Parameters - ---------- - adjustments_for_sid : dict[str -> dict[int -> list]] - The dictionary of adjustments to which splits need to be added. - Initially it contains only overwrites. - requested_qtr_data : pd.DataFrame - The requested quarter data for each calendar date per sid. - dates : pd.DatetimeIndex - The calendar dates for which estimates data is requested. - sid : int - The sid for which adjustments need to be collected. - sid_idx : int - The index of `sid` in the adjusted array. - sid_estimates : pd.DataFrame - The raw estimates data for the given sid. - split_adjusted_asof_idx : int - The index in `dates` as-of which the data is split adjusted. - pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) - The adjustment values and indexes in `dates` for - adjustments that happened before the split-asof-date. - post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) - The adjustment values, indexes in `dates`, and timestamps for - adjustments that happened after the split-asof-date. - requested_split_adjusted_columns : list of str - List of requested split adjusted column names. - """ - (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( - requested_qtr_data, - sid, - sid_idx, - sid_estimates, - split_adjusted_asof_idx, - pre_adjustments, - post_adjustments, - requested_split_adjusted_columns, - ) - self.merge_split_adjustments_with_overwrites( - pre_adjustments_dict, - post_adjustments_dict, - adjustments_for_sid, - requested_split_adjusted_columns, - ) - - -class NextSplitAdjustedEarningsEstimatesLoader( - SplitAdjustedEstimatesLoader, NextEarningsEstimatesLoader -): - def collect_split_adjustments( - self, - adjustments_for_sid, - requested_qtr_data, - dates, - sid, - sid_idx, - sid_estimates, - split_adjusted_asof_idx, - pre_adjustments, - post_adjustments, - requested_split_adjusted_columns, - ): - """Collect split adjustments for future quarters. Re-apply adjustments - that would be overwritten by overwrites. Merge split adjustments with - overwrites into the given dictionary of splits for the given sid. - - Parameters - ---------- - adjustments_for_sid : dict[str -> dict[int -> list]] - The dictionary of adjustments to which splits need to be added. - Initially it contains only overwrites. - requested_qtr_data : pd.DataFrame - The requested quarter data for each calendar date per sid. - dates : pd.DatetimeIndex - The calendar dates for which estimates data is requested. - sid : int - The sid for which adjustments need to be collected. - sid_idx : int - The index of `sid` in the adjusted array. - sid_estimates : pd.DataFrame - The raw estimates data for the given sid. - split_adjusted_asof_idx : int - The index in `dates` as-of which the data is split adjusted. - pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) - The adjustment values and indexes in `dates` for - adjustments that happened before the split-asof-date. - post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) - The adjustment values, indexes in `dates`, and timestamps for - adjustments that happened after the split-asof-date. - requested_split_adjusted_columns : list of str - List of requested split adjusted column names. - """ - (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( - requested_qtr_data, - sid, - sid_idx, - sid_estimates, - split_adjusted_asof_idx, - pre_adjustments, - post_adjustments, - requested_split_adjusted_columns, - ) - for column_name in requested_split_adjusted_columns: - for overwrite_ts in adjustments_for_sid[column_name]: - # We need to cumulatively re-apply all adjustments up to the - # split-adjusted-asof-date. We might not have any - # pre-adjustments, so we should check for that. - if overwrite_ts <= split_adjusted_asof_idx and pre_adjustments_dict: - for split_ts in pre_adjustments_dict[column_name]: - # The split has to have occurred during the span of - # the overwrite. - if split_ts < overwrite_ts: - # Create new adjustments here so that we can - # re-apply all applicable adjustments to ONLY - # the dates being overwritten. - adjustments_for_sid[column_name][overwrite_ts].extend( - [ - Float64Multiply( - 0, - overwrite_ts - 1, - sid_idx, - sid_idx, - adjustment.value, - ) - for adjustment in pre_adjustments_dict[column_name][ - split_ts - ] - ] - ) - # After the split-adjusted-asof-date, we need to re-apply all - # adjustments that occur after that date and within the - # bounds of the overwrite. They need to be applied starting - # from the first date and until an end date. The end date is - # the date of the newest information we get about - # `requested_quarter` that is >= `split_ts`, or if there is no - # new knowledge before `overwrite_ts`, then it is the date - # before `overwrite_ts`. - else: - # Overwrites happen at the first index of a new quarter, - # so determine here which quarter that is. - requested_quarter = requested_qtr_data[ - SHIFTED_NORMALIZED_QTRS, sid - ].iloc[overwrite_ts] - - for adjustment_value, date_index, timestamp in zip( - *post_adjustments - ): - if split_adjusted_asof_idx < date_index < overwrite_ts: - # Assume the entire overwrite contains stale data - upper_bound = overwrite_ts - 1 - end_idx = self.determine_end_idx_for_adjustment( - timestamp, - dates, - upper_bound, - requested_quarter, - sid_estimates, - ) - adjustments_for_sid[column_name][overwrite_ts].append( - Float64Multiply( - 0, end_idx, sid_idx, sid_idx, adjustment_value - ) - ) - - self.merge_split_adjustments_with_overwrites( - pre_adjustments_dict, - post_adjustments_dict, - adjustments_for_sid, - requested_split_adjusted_columns, - ) diff --git a/ziplime/pipeline/loaders/earnings_estimates_loader.py b/ziplime/pipeline/loaders/earnings_estimates_loader.py new file mode 100644 index 0000000000..954b808bea --- /dev/null +++ b/ziplime/pipeline/loaders/earnings_estimates_loader.py @@ -0,0 +1,648 @@ +from abc import abstractmethod + +import pandas as pd +from toolz import groupby + +from ziplime.lib.adjusted_array import AdjustedArray +from ziplime.lib.adjustment import ( + Datetime641DArrayOverwrite, + Datetime64Overwrite, + Float641DArrayOverwrite, + Float64Overwrite, +) + +from ziplime.pipeline.common import ( + EVENT_DATE_FIELD_NAME, + FISCAL_QUARTER_FIELD_NAME, + FISCAL_YEAR_FIELD_NAME, + SID_FIELD_NAME, NORMALIZED_QUARTERS, SHIFTED_NORMALIZED_QTRS, INVALID_NUM_QTRS_MESSAGE, SIMULATION_DATES, +) +from ziplime.pipeline.loaders.pipeline_loader import PipelineLoader +from ziplime.utils.numpy_utils import datetime64ns_dtype, float64_dtype +from ziplime.pipeline.loaders.utils import ( + ffill_across_cols, + last_in_date_group, normalize_quarters, validate_column_specs, split_normalized_quarters, add_new_adjustments, +) + + + +class EarningsEstimatesLoader(PipelineLoader): + """An abstract pipeline loader for estimates data that can load data a + variable number of quarters forwards/backwards from calendar dates + depending on the `num_announcements` attribute of the columns' dataset. + If split adjustments are to be applied, a loader, split-adjusted columns, + and the split-adjusted asof-date must be supplied. + + Parameters + ---------- + estimates : pd.DataFrame + The raw estimates data; must contain at least 5 columns: + sid : int64 + The asset id associated with each estimate. + + event_date : datetime64[ns] + The date on which the event that the estimate is for will/has + occurred. + + timestamp : datetime64[ns] + The datetime where we learned about the estimate. + + fiscal_quarter : int64 + The quarter during which the event has/will occur. + + fiscal_year : int64 + The year during which the event has/will occur. + + name_map : dict[str -> str] + A map of names of BoundColumns that this loader will load to the + names of the corresponding columns in `events`. + """ + + def __init__(self, estimates, name_map): + validate_column_specs(estimates, name_map) + + self.estimates = estimates[ + estimates[EVENT_DATE_FIELD_NAME].notnull() + & estimates[FISCAL_QUARTER_FIELD_NAME].notnull() + & estimates[FISCAL_YEAR_FIELD_NAME].notnull() + ] + self.estimates[NORMALIZED_QUARTERS] = normalize_quarters( + self.estimates[FISCAL_YEAR_FIELD_NAME], + self.estimates[FISCAL_QUARTER_FIELD_NAME], + ) + + self.array_overwrites_dict = { + datetime64ns_dtype: Datetime641DArrayOverwrite, + float64_dtype: Float641DArrayOverwrite, + } + self.scalar_overwrites_dict = { + datetime64ns_dtype: Datetime64Overwrite, + float64_dtype: Float64Overwrite, + } + + self.name_map = name_map + + @abstractmethod + def get_zeroth_quarter_idx(self, stacked_last_per_qtr): + raise NotImplementedError("get_zeroth_quarter_idx") + + @abstractmethod + def get_shifted_qtrs(self, zero_qtrs, num_announcements): + raise NotImplementedError("get_shifted_qtrs") + + @abstractmethod + def create_overwrite_for_estimate( + self, + column, + column_name, + last_per_qtr, + next_qtr_start_idx, + requested_quarter, + sid, + sid_idx, + col_to_split_adjustments, + split_adjusted_asof_idx, + ): + raise NotImplementedError("create_overwrite_for_estimate") + + @property + @abstractmethod + def searchsorted_side(self): + return NotImplementedError("searchsorted_side") + + def get_requested_quarter_data( + self, + zero_qtr_data, + zeroth_quarter_idx, + stacked_last_per_qtr, + num_announcements, + dates, + ): + """Selects the requested data for each date. + + Parameters + ---------- + zero_qtr_data : pd.DataFrame + The 'time zero' data for each calendar date per sid. + zeroth_quarter_idx : pd.Index + An index of calendar dates, sid, and normalized quarters, for only + the rows that have a next or previous earnings estimate. + stacked_last_per_qtr : pd.DataFrame + The latest estimate known with the dates, normalized quarter, and + sid as the index. + num_announcements : int + The number of annoucements out the user requested relative to + each date in the calendar dates. + dates : pd.DatetimeIndex + The calendar dates for which estimates data is requested. + + Returns + -------- + requested_qtr_data : pd.DataFrame + The DataFrame with the latest values for the requested quarter + for all columns; `dates` are the index and columns are a MultiIndex + with sids at the top level and the dataset columns on the bottom. + """ + zero_qtr_data_idx = zero_qtr_data.index + requested_qtr_idx = pd.MultiIndex.from_arrays( + [ + zero_qtr_data_idx.get_level_values(0), + zero_qtr_data_idx.get_level_values(1), + self.get_shifted_qtrs( + zeroth_quarter_idx.get_level_values( + NORMALIZED_QUARTERS, + ), + num_announcements, + ), + ], + names=[ + zero_qtr_data_idx.names[0], + zero_qtr_data_idx.names[1], + SHIFTED_NORMALIZED_QTRS, + ], + ) + + requested_qtr_data = stacked_last_per_qtr.reindex(index=requested_qtr_idx) + requested_qtr_data = requested_qtr_data.reset_index( + SHIFTED_NORMALIZED_QTRS, + ) + # Calculate the actual year/quarter being requested and add those in + # as columns. + ( + requested_qtr_data[FISCAL_YEAR_FIELD_NAME], + requested_qtr_data[FISCAL_QUARTER_FIELD_NAME], + ) = split_normalized_quarters(requested_qtr_data[SHIFTED_NORMALIZED_QTRS]) + # Once we're left with just dates as the index, we can reindex by all + # dates so that we have a value for each calendar date. + return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates) + + def get_split_adjusted_asof_idx(self, dates): + """Compute the index in `dates` where the split-adjusted-asof-date + falls. This is the date up to which, and including which, we will + need to unapply all adjustments for and then re-apply them as they + come in. After this date, adjustments are applied as normal. + + Parameters + ---------- + dates : pd.DatetimeIndex + The calendar dates over which the Pipeline is being computed. + + Returns + ------- + split_adjusted_asof_idx : int + The index in `dates` at which the data should be split. + """ + split_adjusted_asof_idx = dates.searchsorted(self._split_adjusted_asof) + # make_utc_aware(pd.DatetimeIndex(self._split_adjusted_asof)) + # The split-asof date is after the date index. + if split_adjusted_asof_idx == len(dates): + split_adjusted_asof_idx = len(dates) - 1 + if self._split_adjusted_asof.tzinfo is not None: + if self._split_adjusted_asof < dates[0]: + split_adjusted_asof_idx = -1 + else: + if self._split_adjusted_asof < dates[0]: + split_adjusted_asof_idx = -1 + return split_adjusted_asof_idx + + def collect_overwrites_for_sid( + self, + group, + dates, + requested_qtr_data, + last_per_qtr, + sid_idx, + columns, + all_adjustments_for_sid, + sid, + ): + """Given a sid, collect all overwrites that should be applied for this + sid at each quarter boundary. + + Parameters + ---------- + group : pd.DataFrame + The data for `sid`. + dates : pd.DatetimeIndex + The calendar dates for which estimates data is requested. + requested_qtr_data : pd.DataFrame + The DataFrame with the latest values for the requested quarter + for all columns. + last_per_qtr : pd.DataFrame + A DataFrame with a column MultiIndex of [self.estimates.columns, + normalized_quarters, sid] that allows easily getting the timeline + of estimates for a particular sid for a particular quarter. + sid_idx : int + The sid's index in the asset index. + columns : list of BoundColumn + The columns for which the overwrites should be computed. + all_adjustments_for_sid : dict[int -> AdjustedArray] + A dictionary of the integer index of each timestamp into the date + index, mapped to adjustments that should be applied at that + index for the given sid (`sid`). This dictionary is modified as + adjustments are collected. + sid : int + The sid for which overwrites should be computed. + """ + # If data was requested for only 1 date, there can never be any + # overwrites, so skip the extra work. + if len(dates) == 1: + return + + next_qtr_start_indices = dates.searchsorted( + pd.DatetimeIndex(group[EVENT_DATE_FIELD_NAME]), + side=self.searchsorted_side, + ) + + qtrs_with_estimates = group.index.get_level_values(NORMALIZED_QUARTERS).values + for idx in next_qtr_start_indices: + if 0 < idx < len(dates): + # Find the quarter being requested in the quarter we're + # crossing into. + requested_quarter = requested_qtr_data[ + SHIFTED_NORMALIZED_QTRS, + sid, + ].iloc[idx] + # Only add adjustments if the next quarter starts somewhere + # in our date index for this sid. Our 'next' quarter can + # never start at index 0; a starting index of 0 means that + # the next quarter's event date was NaT. + self.create_overwrites_for_quarter( + all_adjustments_for_sid, + idx, + last_per_qtr, + qtrs_with_estimates, + requested_quarter, + sid, + sid_idx, + columns, + ) + + def get_adjustments_for_sid( + self, + group, + dates, + requested_qtr_data, + last_per_qtr, + sid_to_idx, + columns, + col_to_all_adjustments, + **kwargs, + ): + """ + + Parameters + ---------- + group : pd.DataFrame + The data for the given sid. + dates : pd.DatetimeIndex + The calendar dates for which estimates data is requested. + requested_qtr_data : pd.DataFrame + The DataFrame with the latest values for the requested quarter + for all columns. + last_per_qtr : pd.DataFrame + A DataFrame with a column MultiIndex of [self.estimates.columns, + normalized_quarters, sid] that allows easily getting the timeline + of estimates for a particular sid for a particular quarter. + sid_to_idx : dict[int -> int] + A dictionary mapping sid to he sid's index in the asset index. + columns : list of BoundColumn + The columns for which the overwrites should be computed. + col_to_all_adjustments : dict[int -> AdjustedArray] + A dictionary of the integer index of each timestamp into the date + index, mapped to adjustments that should be applied at that + index. This dictionary is for adjustments for ALL sids. It is + modified as adjustments are collected. + kwargs : + Additional arguments used in collecting adjustments; unused here. + """ + # Collect all adjustments for a given sid. + all_adjustments_for_sid = {} + sid = int(group.name) + self.collect_overwrites_for_sid( + group, + dates, + requested_qtr_data, + last_per_qtr, + sid_to_idx[sid], + columns, + all_adjustments_for_sid, + sid, + ) + self.merge_into_adjustments_for_all_sids( + all_adjustments_for_sid, col_to_all_adjustments + ) + + def merge_into_adjustments_for_all_sids( + self, all_adjustments_for_sid, col_to_all_adjustments + ): + """Merge adjustments for a particular sid into a dictionary containing + adjustments for all sids. + + Parameters + ---------- + all_adjustments_for_sid : dict[int -> AdjustedArray] + All adjustments for a particular sid. + col_to_all_adjustments : dict[int -> AdjustedArray] + All adjustments for all sids. + """ + + for col_name in all_adjustments_for_sid: + if col_name not in col_to_all_adjustments: + col_to_all_adjustments[col_name] = {} + for ts in all_adjustments_for_sid[col_name]: + adjs = all_adjustments_for_sid[col_name][ts] + add_new_adjustments(col_to_all_adjustments, adjs, col_name, ts) + + def get_adjustments( + self, + zero_qtr_data, + requested_qtr_data, + last_per_qtr, + dates, + assets, + columns, + **kwargs, + ): + """Creates an AdjustedArray from the given estimates data for the given + dates. + + Parameters + ---------- + zero_qtr_data : pd.DataFrame + The 'time zero' data for each calendar date per sid. + requested_qtr_data : pd.DataFrame + The requested quarter data for each calendar date per sid. + last_per_qtr : pd.DataFrame + A DataFrame with a column MultiIndex of [self.estimates.columns, + normalized_quarters, sid] that allows easily getting the timeline + of estimates for a particular sid for a particular quarter. + dates : pd.DatetimeIndex + The calendar dates for which estimates data is requested. + assets : pd.Int64Index + An index of all the assets from the raw data. + columns : list of BoundColumn + The columns for which adjustments need to be calculated. + kwargs : + Additional keyword arguments that should be forwarded to + `get_adjustments_for_sid` and to be used in computing adjustments + for each sid. + + Returns + ------- + col_to_all_adjustments : dict[int -> AdjustedArray] + A dictionary of all adjustments that should be applied. + """ + + zero_qtr_data.sort_index(inplace=True) + # Here we want to get the LAST record from each group of records + # corresponding to a single quarter. This is to ensure that we select + # the most up-to-date event date in case the event date changes. + quarter_shifts = zero_qtr_data.groupby( + level=[SID_FIELD_NAME, NORMALIZED_QUARTERS] + ).nth(-1) + + col_to_all_adjustments = {} + sid_to_idx = dict(zip(assets, range(len(assets)))) + quarter_shifts.groupby(level=SID_FIELD_NAME).apply( + self.get_adjustments_for_sid, + dates, + requested_qtr_data, + last_per_qtr, + sid_to_idx, + columns, + col_to_all_adjustments, + **kwargs, + ) + return col_to_all_adjustments + + def create_overwrites_for_quarter( + self, + col_to_overwrites, + next_qtr_start_idx, + last_per_qtr, + quarters_with_estimates_for_sid, + requested_quarter, + sid, + sid_idx, + columns, + ): + """Add entries to the dictionary of columns to adjustments for the given + sid and the given quarter. + + Parameters + ---------- + col_to_overwrites : dict [column_name -> list of ArrayAdjustment] + A dictionary mapping column names to all overwrites for those + columns. + next_qtr_start_idx : int + The index of the first day of the next quarter in the calendar + dates. + last_per_qtr : pd.DataFrame + A DataFrame with a column MultiIndex of [self.estimates.columns, + normalized_quarters, sid] that allows easily getting the timeline + of estimates for a particular sid for a particular quarter; this + is particularly useful for getting adjustments for 'next' + estimates. + quarters_with_estimates_for_sid : np.array + An array of all quarters for which there are estimates for the + given sid. + requested_quarter : float + The quarter for which the overwrite should be created. + sid : int + The sid for which to create overwrites. + sid_idx : int + The index of the sid in `assets`. + columns : list of BoundColumn + The columns for which to create overwrites. + """ + for col in columns: + column_name = self.name_map[col.name] + if column_name not in col_to_overwrites: + col_to_overwrites[column_name] = {} + # If there are estimates for the requested quarter, + # overwrite all values going up to the starting index of + # that quarter with estimates for that quarter. + if requested_quarter in quarters_with_estimates_for_sid: + adjs = self.create_overwrite_for_estimate( + col, + column_name, + last_per_qtr, + next_qtr_start_idx, + requested_quarter, + sid, + sid_idx, + ) + add_new_adjustments( + col_to_overwrites, adjs, column_name, next_qtr_start_idx + ) + # There are no estimates for the quarter. Overwrite all + # values going up to the starting index of that quarter + # with the missing value for this column. + else: + adjs = [self.overwrite_with_null(col, next_qtr_start_idx, sid_idx)] + add_new_adjustments( + col_to_overwrites, adjs, column_name, next_qtr_start_idx + ) + + def overwrite_with_null(self, column, next_qtr_start_idx, sid_idx): + return self.scalar_overwrites_dict[column.dtype]( + 0, next_qtr_start_idx - 1, sid_idx, sid_idx, column.missing_value + ) + + async def load_adjusted_array(self, domain, columns, dates, sids, mask): + # Separate out getting the columns' datasets and the datasets' + # num_announcements attributes to ensure that we're catching the right + # AttributeError. + col_to_datasets = {col: col.dataset for col in columns} + try: + groups = groupby( + lambda col: col_to_datasets[col].num_announcements, col_to_datasets + ) + except AttributeError as exc: + raise AttributeError( + "Datasets loaded via the " + "EarningsEstimatesLoader must define a " + "`num_announcements` attribute that defines " + "how many quarters out the loader should load" + " the data relative to `dates`." + ) from exc + if any(num_qtr < 0 for num_qtr in groups): + raise ValueError( + INVALID_NUM_QTRS_MESSAGE + % ",".join(str(qtr) for qtr in groups if qtr < 0) + ) + out = {} + # To optimize performance, only work below on assets that are + # actually in the raw data. + data_query_cutoff_times = domain.data_query_cutoff_for_sessions(dates) + assets_with_data = set(sids) & set(self.estimates[SID_FIELD_NAME]) + last_per_qtr, stacked_last_per_qtr = self.get_last_data_per_qtr( + assets_with_data, + columns, + dates, + data_query_cutoff_times, + ) + # Determine which quarter is immediately next/previous for each + # date. + zeroth_quarter_idx = self.get_zeroth_quarter_idx(stacked_last_per_qtr) + zero_qtr_data = stacked_last_per_qtr.loc[zeroth_quarter_idx] + + for num_announcements, columns in groups.items(): + requested_qtr_data = self.get_requested_quarter_data( + zero_qtr_data, + zeroth_quarter_idx, + stacked_last_per_qtr, + num_announcements, + dates, + ) + + # Calculate all adjustments for the given quarter and accumulate + # them for each column. + col_to_adjustments = self.get_adjustments( + zero_qtr_data, requested_qtr_data, last_per_qtr, dates, sids, columns + ) + + # Lookup the asset indexer once, this is so we can reindex + # the assets returned into the assets requested for each column. + # This depends on the fact that our column pd.MultiIndex has the same + # sids for each field. This allows us to do the lookup once on + # level 1 instead of doing the lookup each time per value in + # level 0. + # asset_indexer = sids.get_indexer_for( + # requested_qtr_data.columns.levels[1], + # ) + for col in columns: + column_name = self.name_map[col.name] + # allocate the empty output with the correct missing value + # shape = len(dates), len(sids) + # output_array = np.full(shape=shape, + # fill_value=col.missing_value, + # dtype=col.dtype) + # overwrite the missing value with values from the computed data + try: + output_array = ( + requested_qtr_data[column_name] + .reindex(sids, axis=1) + .to_numpy() + .astype(col.dtype) + ) + except Exception: + output_array = ( + requested_qtr_data[column_name] + .reindex(sids, axis=1) + .to_numpy(na_value=col.missing_value) + .astype(col.dtype) + ) + + # except ValueError: + # np.copyto(output_array[:, asset_indexer], + # requested_qtr_data[column_name].to_numpy(na_value=output_array.dtype), + # casting='unsafe') + out[col] = AdjustedArray( + output_array, + # There may not be any adjustments at all (e.g. if + # len(date) == 1), so provide a default. + dict(col_to_adjustments.get(column_name, {})), + col.missing_value, + ) + return out + + def get_last_data_per_qtr( + self, assets_with_data, columns, dates, data_query_cutoff_times + ): + """Determine the last piece of information we know for each column on each + date in the index for each sid and quarter. + + Parameters + ---------- + assets_with_data : pd.Index + Index of all assets that appear in the raw data given to the + loader. + columns : iterable of BoundColumn + The columns that need to be loaded from the raw data. + data_query_cutoff_times : pd.DatetimeIndex + The calendar of dates for which data should be loaded. + + Returns + ------- + stacked_last_per_qtr : pd.DataFrame + A DataFrame indexed by [dates, sid, normalized_quarters] that has + the latest information for each row of the index, sorted by event + date. + last_per_qtr : pd.DataFrame + A DataFrame with columns that are a MultiIndex of [ + self.estimates.columns, normalized_quarters, sid]. + """ + # Get a DataFrame indexed by date with a MultiIndex of columns of + # [self.estimates.columns, normalized_quarters, sid], where each cell + # contains the latest data for that day. + last_per_qtr = last_in_date_group( + self.estimates, + data_query_cutoff_times, + assets_with_data, + reindex=True, + extra_groupers=[NORMALIZED_QUARTERS], + ) + last_per_qtr.index = dates + # Forward fill values for each quarter/sid/dataset column. + ffill_across_cols(last_per_qtr, columns, self.name_map) + # Stack quarter and sid into the index. + stacked_last_per_qtr = last_per_qtr.stack( + [SID_FIELD_NAME, NORMALIZED_QUARTERS], + ) + # Set date index name for ease of reference + stacked_last_per_qtr.index.set_names( + SIMULATION_DATES, + level=0, + inplace=True, + ) + stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime( + stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] + ) + stacked_last_per_qtr = stacked_last_per_qtr.sort_values(EVENT_DATE_FIELD_NAME) + return last_per_qtr, stacked_last_per_qtr + + + + diff --git a/ziplime/pipeline/loaders/equity_pricing_loader.py b/ziplime/pipeline/loaders/equity_pricing_loader.py index a27599b7ba..b6196ee96c 100644 --- a/ziplime/pipeline/loaders/equity_pricing_loader.py +++ b/ziplime/pipeline/loaders/equity_pricing_loader.py @@ -19,10 +19,12 @@ from ziplime.lib.adjusted_array import AdjustedArray from ziplime.utils.numpy_utils import repeat_first_axis -from .base import PipelineLoader +from .pipeline_loader import PipelineLoader from .utils import shift_dates +from .. import Domain from ..data.equity_pricing import EquityPricing -from ...data.domain.data_bundle import DataBundle +from ...assets.services.asset_service import AssetService +from ...data.services.data_source import DataSource UINT32_MAX = iinfo(uint32).max @@ -40,13 +42,13 @@ class EquityPricingLoader(PipelineLoader): Reader providing currency conversions. """ - def __init__(self, raw_price_reader, adjustments_reader, fx_reader): - self.raw_price_reader = raw_price_reader - self.adjustments_reader = adjustments_reader + def __init__(self, data_source: DataSource, asset_service: AssetService, fx_reader): + self.data_source = data_source + self.asset_service = asset_service self.fx_reader = fx_reader @classmethod - def without_fx(cls, data_bundle: DataBundle): + def without_fx(cls, data_source: DataSource, asset_service: AssetService): """ Construct an EquityPricingLoader without support for fx rates. @@ -66,12 +68,12 @@ def without_fx(cls, data_bundle: DataBundle): A loader that can only provide currency-naive data. """ return cls( - raw_price_reader=None, # fix this - adjustments_reader=None, + data_source=data_source, # fix this fx_reader=ExplodingFXRateReader(), + asset_service=asset_service ) - def load_adjusted_array(self, domain, columns, dates, sids, mask): + async def load_adjusted_array(self, domain: Domain, columns, dates, sids, mask): # load_adjusted_array is called with dates on which the user's algo # will be shown data, which means we need to return the data that would # be known at the **start** of each date. We assume that the latest @@ -81,36 +83,39 @@ def load_adjusted_array(self, domain, columns, dates, sids, mask): shifted_dates = shift_dates(sessions, dates[0], dates[-1], shift=1) ohlcv_cols, currency_cols = self._split_column_types(columns) - del columns # From here on we should use ohlcv_cols or currency_cols. + # del columns # From here on we should use ohlcv_cols or currency_cols. ohlcv_colnames = [c.name for c in ohlcv_cols] - raw_ohlcv_arrays = self.raw_price_reader.load_raw_arrays( - ohlcv_colnames, - shifted_dates[0], - shifted_dates[-1], - sids, + raw_ohlcv_arrays = self.data_source.get_data_by_date_and_sids( + fields=frozenset(ohlcv_colnames), + start_date=shifted_dates[0].tz_localize(domain.calendar.tz).to_pydatetime(), + end_date=shifted_dates[-1].tz_localize(domain.calendar.tz).to_pydatetime(), + sids=list(sids), + frequency=self.data_source.frequency, + include_bounds=True ) # Currency convert raw_arrays in place if necessary. We use shifted # dates to load currency conversion rates to make them line up with # dates used to fetch prices. - self._inplace_currency_convert( - ohlcv_cols, - raw_ohlcv_arrays, - shifted_dates, - sids, - ) - - adjustments = self.adjustments_reader.load_pricing_adjustments( + # TODO: set currency conversion + # self._inplace_currency_convert( + # ohlcv_cols, + # raw_ohlcv_arrays, + # shifted_dates, + # sids, + # ) + + adjustments = await self.asset_service.load_pricing_adjustments( ohlcv_colnames, dates, sids, ) - + ohlcv_arrays = [raw_ohlcv_arrays[c.name] for c in ohlcv_cols] out = {} - for c, c_raw, c_adjs in zip(ohlcv_cols, raw_ohlcv_arrays, adjustments): + for c, c_raw, c_adjs in zip(ohlcv_cols, ohlcv_arrays, adjustments): out[c] = AdjustedArray( - c_raw.astype(c.dtype), + c_raw.to_numpy().astype(c.dtype), c_adjs, c.missing_value, ) @@ -123,7 +128,6 @@ def load_adjusted_array(self, domain, columns, dates, sids, mask): adjustments={}, missing_value=None, ) - return out @property @@ -208,6 +212,3 @@ def _split_column_types(self, columns): return ohlcv, currency - -# Backwards compat alias. -USEquityPricingLoader = EquityPricingLoader diff --git a/ziplime/pipeline/loaders/events.py b/ziplime/pipeline/loaders/events.py index 5965264c48..5e0308aa04 100644 --- a/ziplime/pipeline/loaders/events.py +++ b/ziplime/pipeline/loaders/events.py @@ -3,13 +3,13 @@ from toolz import groupby, merge -from .base import PipelineLoader +from .pipeline_loader import PipelineLoader from ziplime.pipeline.common import ( EVENT_DATE_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME, ) -from ziplime.pipeline.loaders.frame import DataFrameLoader +from ziplime.pipeline.loaders.data_frame_loader import DataFrameLoader from ziplime.pipeline.loaders.utils import ( next_event_indexer, previous_event_indexer, @@ -159,11 +159,11 @@ def previous_event_indexer(self, data_query_time, sids): self.events[SID_FIELD_NAME], ) - def load_next_events(self, domain, columns, dates, data_query_time, sids, mask): + async def load_next_events(self, domain, columns, dates, data_query_time, sids, mask): if not columns: return {} - return self._load_events( + return await self._load_events( name_map=self.next_value_columns, indexer=self.next_event_indexer(dates, data_query_time, sids), domain=domain, @@ -173,11 +173,11 @@ def load_next_events(self, domain, columns, dates, data_query_time, sids, mask): mask=mask, ) - def load_previous_events(self, domain, columns, dates, data_query_time, sids, mask): + async def load_previous_events(self, domain, columns, dates, data_query_time, sids, mask): if not columns: return {} - return self._load_events( + return await self._load_events( name_map=self.previous_value_columns, indexer=self.previous_event_indexer(data_query_time, sids), domain=domain, @@ -187,7 +187,7 @@ def load_previous_events(self, domain, columns, dates, data_query_time, sids, ma mask=mask, ) - def _load_events(self, name_map, indexer, domain, columns, dates, sids, mask): + async def _load_events(self, name_map, indexer, domain, columns, dates, sids, mask): def to_frame(array): return pd.DataFrame(array, index=dates, columns=sids) @@ -221,7 +221,7 @@ def to_frame(array): # Delegate the actual array formatting logic to a DataFrameLoader. loader = DataFrameLoader(c, to_frame(raw), adjustments=None) - out[c] = loader.load_adjusted_array( + out[c] = await loader.load_adjusted_array( domain, [c], dates, @@ -230,10 +230,10 @@ def to_frame(array): )[c] return out - def load_adjusted_array(self, domain, columns, dates, sids, mask): + async def load_adjusted_array(self, domain, columns, dates, sids, mask): data_query = domain.data_query_cutoff_for_sessions(dates) n, p = self.split_next_and_previous_event_columns(columns) return merge( - self.load_next_events(domain, n, dates, data_query, sids, mask), - self.load_previous_events(domain, p, dates, data_query, sids, mask), + await self.load_next_events(domain, n, dates, data_query, sids, mask), + await self.load_previous_events(domain, p, dates, data_query, sids, mask), ) diff --git a/ziplime/pipeline/loaders/eye_loader.py b/ziplime/pipeline/loaders/eye_loader.py new file mode 100644 index 0000000000..475d2f1870 --- /dev/null +++ b/ziplime/pipeline/loaders/eye_loader.py @@ -0,0 +1,26 @@ +import numpy as np + +from ziplime.pipeline.loaders.precomputed_loader import PrecomputedLoader + + +class EyeLoader(PrecomputedLoader): + """A PrecomputedLoader that emits arrays containing 1s on the diagonal and 0s + elsewhere. + + Parameters + ---------- + columns : list[BoundColumn] + Columns that this loader should know about. + dates : iterable[datetime-like] + Same as PrecomputedLoader. + sids : iterable[int-like] + Same as PrecomputedLoader + """ + + def __init__(self, columns, dates, sids): + shape = (len(dates), len(sids)) + super(EyeLoader, self).__init__( + {column: np.eye(shape, dtype=column.dtype) for column in columns}, + dates, + sids, + ) diff --git a/ziplime/pipeline/loaders/next_earnings_estimates_loader.py b/ziplime/pipeline/loaders/next_earnings_estimates_loader.py new file mode 100644 index 0000000000..cb5baae050 --- /dev/null +++ b/ziplime/pipeline/loaders/next_earnings_estimates_loader.py @@ -0,0 +1,73 @@ +import pandas as pd + +from ziplime.pipeline.common import ( + EVENT_DATE_FIELD_NAME, + SID_FIELD_NAME, SIMULATION_DATES, +) +from ziplime.pipeline.loaders.earnings_estimates_loader import EarningsEstimatesLoader + + +class NextEarningsEstimatesLoader(EarningsEstimatesLoader): + searchsorted_side = "right" + + def create_overwrite_for_estimate( + self, + column, + column_name, + last_per_qtr, + next_qtr_start_idx, + requested_quarter, + sid, + sid_idx, + col_to_split_adjustments=None, + split_adjusted_asof_idx=None, + ): + return [ + self.array_overwrites_dict[column.dtype]( + 0, + next_qtr_start_idx - 1, + sid_idx, + sid_idx, + last_per_qtr[ + column_name, + requested_quarter, + sid, + ].values[:next_qtr_start_idx], + ) + ] + + def get_shifted_qtrs(self, zero_qtrs, num_announcements): + return zero_qtrs + (num_announcements - 1) + + def get_zeroth_quarter_idx(self, stacked_last_per_qtr): + """Filters for releases that are on or after each simulation date and + determines the next quarter by picking out the upcoming release for + each date in the index. + + Parameters + ---------- + stacked_last_per_qtr : pd.DataFrame + A DataFrame with index of calendar dates, sid, and normalized + quarters with each row being the latest estimate for the row's + index values, sorted by event date. + + Returns + ------- + next_releases_per_date_index : pd.MultiIndex + An index of calendar dates, sid, and normalized quarters, for only + the rows that have a next event. + """ + next_releases_per_date = ( + stacked_last_per_qtr.loc[ + stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] + >= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES) + ] + .groupby( + level=[SIMULATION_DATES, SID_FIELD_NAME], + as_index=False, + # Here we take advantage of the fact that `stacked_last_per_qtr` is + # sorted by event date. + ) + .nth(0) + ) + return next_releases_per_date.index diff --git a/ziplime/pipeline/loaders/next_split_adjusted_earnings_estimates_loader.py b/ziplime/pipeline/loaders/next_split_adjusted_earnings_estimates_loader.py new file mode 100644 index 0000000000..114c587cdb --- /dev/null +++ b/ziplime/pipeline/loaders/next_split_adjusted_earnings_estimates_loader.py @@ -0,0 +1,129 @@ +from ziplime.lib.adjustment import Float64Multiply +from ziplime.pipeline.common import SHIFTED_NORMALIZED_QTRS +from ziplime.pipeline.loaders.next_earnings_estimates_loader import NextEarningsEstimatesLoader +from ziplime.pipeline.loaders.split_adjusted_estimates_loader import SplitAdjustedEstimatesLoader + + +class NextSplitAdjustedEarningsEstimatesLoader( + SplitAdjustedEstimatesLoader, NextEarningsEstimatesLoader +): + def collect_split_adjustments( + self, + adjustments_for_sid, + requested_qtr_data, + dates, + sid, + sid_idx, + sid_estimates, + split_adjusted_asof_idx, + pre_adjustments, + post_adjustments, + requested_split_adjusted_columns, + ): + """Collect split adjustments for future quarters. Re-apply adjustments + that would be overwritten by overwrites. Merge split adjustments with + overwrites into the given dictionary of splits for the given sid. + + Parameters + ---------- + adjustments_for_sid : dict[str -> dict[int -> list]] + The dictionary of adjustments to which splits need to be added. + Initially it contains only overwrites. + requested_qtr_data : pd.DataFrame + The requested quarter data for each calendar date per sid. + dates : pd.DatetimeIndex + The calendar dates for which estimates data is requested. + sid : int + The sid for which adjustments need to be collected. + sid_idx : int + The index of `sid` in the adjusted array. + sid_estimates : pd.DataFrame + The raw estimates data for the given sid. + split_adjusted_asof_idx : int + The index in `dates` as-of which the data is split adjusted. + pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) + The adjustment values and indexes in `dates` for + adjustments that happened before the split-asof-date. + post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) + The adjustment values, indexes in `dates`, and timestamps for + adjustments that happened after the split-asof-date. + requested_split_adjusted_columns : list of str + List of requested split adjusted column names. + """ + (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( + requested_qtr_data, + sid, + sid_idx, + sid_estimates, + split_adjusted_asof_idx, + pre_adjustments, + post_adjustments, + requested_split_adjusted_columns, + ) + for column_name in requested_split_adjusted_columns: + for overwrite_ts in adjustments_for_sid[column_name]: + # We need to cumulatively re-apply all adjustments up to the + # split-adjusted-asof-date. We might not have any + # pre-adjustments, so we should check for that. + if overwrite_ts <= split_adjusted_asof_idx and pre_adjustments_dict: + for split_ts in pre_adjustments_dict[column_name]: + # The split has to have occurred during the span of + # the overwrite. + if split_ts < overwrite_ts: + # Create new adjustments here so that we can + # re-apply all applicable adjustments to ONLY + # the dates being overwritten. + adjustments_for_sid[column_name][overwrite_ts].extend( + [ + Float64Multiply( + 0, + overwrite_ts - 1, + sid_idx, + sid_idx, + adjustment.value, + ) + for adjustment in pre_adjustments_dict[column_name][ + split_ts + ] + ] + ) + # After the split-adjusted-asof-date, we need to re-apply all + # adjustments that occur after that date and within the + # bounds of the overwrite. They need to be applied starting + # from the first date and until an end date. The end date is + # the date of the newest information we get about + # `requested_quarter` that is >= `split_ts`, or if there is no + # new knowledge before `overwrite_ts`, then it is the date + # before `overwrite_ts`. + else: + # Overwrites happen at the first index of a new quarter, + # so determine here which quarter that is. + requested_quarter = requested_qtr_data[ + SHIFTED_NORMALIZED_QTRS, sid + ].iloc[overwrite_ts] + + for adjustment_value, date_index, timestamp in zip( + *post_adjustments + ): + if split_adjusted_asof_idx < date_index < overwrite_ts: + # Assume the entire overwrite contains stale data + upper_bound = overwrite_ts - 1 + end_idx = self.determine_end_idx_for_adjustment( + timestamp, + dates, + upper_bound, + requested_quarter, + sid_estimates, + ) + adjustments_for_sid[column_name][overwrite_ts].append( + Float64Multiply( + 0, end_idx, sid_idx, sid_idx, adjustment_value + ) + ) + + self.merge_split_adjustments_with_overwrites( + pre_adjustments_dict, + post_adjustments_dict, + adjustments_for_sid, + requested_split_adjusted_columns, + ) diff --git a/ziplime/pipeline/loaders/base.py b/ziplime/pipeline/loaders/pipeline_loader.py similarity index 94% rename from ziplime/pipeline/loaders/base.py rename to ziplime/pipeline/loaders/pipeline_loader.py index 973bb08996..beba4db71a 100644 --- a/ziplime/pipeline/loaders/base.py +++ b/ziplime/pipeline/loaders/pipeline_loader.py @@ -6,7 +6,7 @@ class PipelineLoader: """Interface for PipelineLoaders.""" - def load_adjusted_array(self, domain, columns, dates, sids, mask): + async def load_adjusted_array(self, domain, columns, dates, sids, mask): """ Load data for ``columns`` as AdjustedArrays. diff --git a/ziplime/pipeline/loaders/precomputed_loader.py b/ziplime/pipeline/loaders/precomputed_loader.py new file mode 100644 index 0000000000..7db9bfe125 --- /dev/null +++ b/ziplime/pipeline/loaders/precomputed_loader.py @@ -0,0 +1,56 @@ +from pandas import DataFrame + +from ziplime.pipeline.loaders.data_frame_loader import DataFrameLoader +from ziplime.pipeline.loaders.pipeline_loader import PipelineLoader + + +class PrecomputedLoader(PipelineLoader): + """Synthetic PipelineLoader that uses a pre-computed array for each column. + + Parameters + ---------- + values : dict + Map from column to values to use for that column. + Values can be anything that can be passed as the first positional + argument to a DataFrame whose indices are ``dates`` and ``sids`` + dates : iterable[datetime-like] + Row labels for input data. Can be anything that pd.DataFrame will + coerce to a DatetimeIndex. + sids : iterable[int-like] + Column labels for input data. Can be anything that pd.DataFrame will + coerce to an Int64Index. + + Notes + ----- + Adjustments are unsupported by this loader. + """ + + def __init__(self, constants, dates, sids): + loaders = {} + for column, const in constants.items(): + frame = DataFrame( + const, + index=dates, + columns=sids, + dtype=column.dtype, + ) + loaders[column] = DataFrameLoader( + column=column, + baseline=frame, + adjustments=None, + ) + + self._loaders = loaders + + async def load_adjusted_array(self, domain, columns, dates, sids, mask): + """Load by delegating to sub-loaders.""" + out = {} + for col in columns: + try: + loader = self._loaders.get(col) + if loader is None: + loader = self._loaders[col.unspecialize()] + except KeyError as exc: + raise ValueError("Couldn't find loader for %s" % col) from exc + out.update(loader.load_adjusted_array(domain, [col], dates, sids, mask)) + return out diff --git a/ziplime/pipeline/loaders/previous_earnings_estimates_loader.py b/ziplime/pipeline/loaders/previous_earnings_estimates_loader.py new file mode 100644 index 0000000000..91d7f15f95 --- /dev/null +++ b/ziplime/pipeline/loaders/previous_earnings_estimates_loader.py @@ -0,0 +1,63 @@ +from ziplime.pipeline.common import SID_FIELD_NAME, EVENT_DATE_FIELD_NAME, SIMULATION_DATES +from ziplime.pipeline.loaders.earnings_estimates_loader import EarningsEstimatesLoader + + +class PreviousEarningsEstimatesLoader(EarningsEstimatesLoader): + searchsorted_side = "left" + + def create_overwrite_for_estimate( + self, + column, + column_name, + dates, + next_qtr_start_idx, + requested_quarter, + sid, + sid_idx, + col_to_split_adjustments=None, + split_adjusted_asof_idx=None, + split_dict=None, + ): + return [ + self.overwrite_with_null( + column, + next_qtr_start_idx, + sid_idx, + ) + ] + + def get_shifted_qtrs(self, zero_qtrs, num_announcements): + return zero_qtrs - (num_announcements - 1) + + def get_zeroth_quarter_idx(self, stacked_last_per_qtr): + """Filters for releases that are on or after each simulation date and + determines the previous quarter by picking out the most recent + release relative to each date in the index. + + Parameters + ---------- + stacked_last_per_qtr : pd.DataFrame + A DataFrame with index of calendar dates, sid, and normalized + quarters with each row being the latest estimate for the row's + index values, sorted by event date. + + Returns + ------- + previous_releases_per_date_index : pd.MultiIndex + An index of calendar dates, sid, and normalized quarters, for only + the rows that have a previous event. + """ + previous_releases_per_date = ( + stacked_last_per_qtr.loc[ + stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] + <= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES) + ] + .groupby( + level=[SIMULATION_DATES, SID_FIELD_NAME], + as_index=False, + # Here we take advantage of the fact that `stacked_last_per_qtr` is + # sorted by event date. + ) + .nth(-1) + ) + return previous_releases_per_date.index diff --git a/ziplime/pipeline/loaders/previous_split_adjusted_earnings_estimates_loader.py b/ziplime/pipeline/loaders/previous_split_adjusted_earnings_estimates_loader.py new file mode 100644 index 0000000000..e47f39e114 --- /dev/null +++ b/ziplime/pipeline/loaders/previous_split_adjusted_earnings_estimates_loader.py @@ -0,0 +1,67 @@ +from ziplime.pipeline.loaders.previous_earnings_estimates_loader import PreviousEarningsEstimatesLoader +from ziplime.pipeline.loaders.split_adjusted_estimates_loader import SplitAdjustedEstimatesLoader + + +class PreviousSplitAdjustedEarningsEstimatesLoader( + SplitAdjustedEstimatesLoader, PreviousEarningsEstimatesLoader +): + def collect_split_adjustments( + self, + adjustments_for_sid, + requested_qtr_data, + dates, + sid, + sid_idx, + sid_estimates, + split_adjusted_asof_idx, + pre_adjustments, + post_adjustments, + requested_split_adjusted_columns, + ): + """Collect split adjustments for previous quarters and apply them to the + given dictionary of splits for the given sid. Since overwrites just + replace all estimates before the new quarter with NaN, we don't need to + worry about re-applying split adjustments. + + Parameters + ---------- + adjustments_for_sid : dict[str -> dict[int -> list]] + The dictionary of adjustments to which splits need to be added. + Initially it contains only overwrites. + requested_qtr_data : pd.DataFrame + The requested quarter data for each calendar date per sid. + dates : pd.DatetimeIndex + The calendar dates for which estimates data is requested. + sid : int + The sid for which adjustments need to be collected. + sid_idx : int + The index of `sid` in the adjusted array. + sid_estimates : pd.DataFrame + The raw estimates data for the given sid. + split_adjusted_asof_idx : int + The index in `dates` as-of which the data is split adjusted. + pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) + The adjustment values and indexes in `dates` for + adjustments that happened before the split-asof-date. + post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) + The adjustment values, indexes in `dates`, and timestamps for + adjustments that happened after the split-asof-date. + requested_split_adjusted_columns : list of str + List of requested split adjusted column names. + """ + (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( + requested_qtr_data, + sid, + sid_idx, + sid_estimates, + split_adjusted_asof_idx, + pre_adjustments, + post_adjustments, + requested_split_adjusted_columns, + ) + self.merge_split_adjustments_with_overwrites( + pre_adjustments_dict, + post_adjustments_dict, + adjustments_for_sid, + requested_split_adjusted_columns, + ) diff --git a/ziplime/pipeline/loaders/seed_random_loader.py b/ziplime/pipeline/loaders/seed_random_loader.py new file mode 100644 index 0000000000..e1235f7f7a --- /dev/null +++ b/ziplime/pipeline/loaders/seed_random_loader.py @@ -0,0 +1,89 @@ + +from numpy.random import RandomState +from pandas import Timestamp + + +from ziplime.utils.numpy_utils import ( + bool_dtype, + datetime64ns_dtype, + float64_dtype, + int64_dtype, + object_dtype, +) +from .precomputed_loader import PrecomputedLoader +from ...assets.repositories.sqlalchemy_adjustments_repository import SqlAlchemyAdjustmentRepository + + +class SeededRandomLoader(PrecomputedLoader): + """A PrecomputedLoader that emits arrays randomly-generated with a given seed. + + Parameters + ---------- + seed : int + Seed for numpy.random.RandomState. + columns : list[BoundColumn] + Columns that this loader should know about. + dates : iterable[datetime-like] + Same as PrecomputedLoader. + sids : iterable[int-like] + Same as PrecomputedLoader + """ + + def __init__(self, seed, columns, dates, sids): + self._seed = seed + super(SeededRandomLoader, self).__init__( + {c: self.values(c.dtype, dates, sids) for c in columns}, + dates, + sids, + ) + + def values(self, dtype, dates, sids): + """Make a random array of shape (len(dates), len(sids)) with ``dtype``.""" + shape = (len(dates), len(sids)) + return { + datetime64ns_dtype: self._datetime_values, + float64_dtype: self._float_values, + int64_dtype: self._int_values, + bool_dtype: self._bool_values, + object_dtype: self._object_values, + }[dtype](shape) + + @property + def state(self): + """Make a new RandomState from our seed. + + This ensures that every call to _*_values produces the same output + every time for a given SeededRandomLoader instance. + """ + return RandomState(self._seed) + + def _float_values(self, shape): + """Return uniformly-distributed floats between -0.0 and 100.0.""" + return self.state.uniform(low=0.0, high=100.0, size=shape) + + def _int_values(self, shape): + """ + Return uniformly-distributed integers between 0 and 100. + """ + return self.state.randint(low=0, high=100, size=shape).astype( + "int64" + ) # default is system int + + def _datetime_values(self, shape): + """Return uniformly-distributed dates in 2014.""" + start = Timestamp("2014", tz="UTC").asm8 + offsets = self.state.randint( + low=0, + high=364, + size=shape, + ).astype("timedelta64[D]") + return start + offsets + + def _bool_values(self, shape): + """Return uniformly-distributed True/False values.""" + return self.state.randn(*shape) < 0 + + def _object_values(self, shape): + res = self._int_values(shape).astype(str).astype(object) + return res + diff --git a/ziplime/pipeline/loaders/split_adjusted_estimates_loader.py b/ziplime/pipeline/loaders/split_adjusted_estimates_loader.py new file mode 100644 index 0000000000..d473363045 --- /dev/null +++ b/ziplime/pipeline/loaders/split_adjusted_estimates_loader.py @@ -0,0 +1,471 @@ +from abc import abstractmethod + +import numpy as np +import pandas as pd +from ziplime.lib.adjustment import ( + Float64Multiply, +) + +from ziplime.pipeline.common import ( + SID_FIELD_NAME, + TS_FIELD_NAME, + NORMALIZED_QUARTERS, SHIFTED_NORMALIZED_QTRS, +) +from ziplime.pipeline.loaders.earnings_estimates_loader import EarningsEstimatesLoader +from ziplime.pipeline.loaders.utils import validate_split_adjusted_column_specs, add_new_adjustments + + +class SplitAdjustedEstimatesLoader(EarningsEstimatesLoader): + """Estimates loader that loads data that needs to be split-adjusted. + + Parameters + ---------- + split_adjustments_loader : SQLiteAdjustmentReader + The loader to use for reading split adjustments. + split_adjusted_column_names : iterable of str + The column names that should be split-adjusted. + split_adjusted_asof : pd.Timestamp + The date that separates data into 2 halves: the first half is the set + of dates up to and including the split_adjusted_asof date. All + adjustments occurring during this first half are applied to all + dates in this first half. The second half is the set of dates after + the split_adjusted_asof date. All adjustments occurring during this + second half are applied sequentially as they appear in the timeline. + """ + + def __init__( + self, + estimates, + name_map, + split_adjustments_loader, + split_adjusted_column_names, + split_adjusted_asof, + ): + validate_split_adjusted_column_specs(name_map, split_adjusted_column_names) + self._split_adjustments = split_adjustments_loader + self._split_adjusted_column_names = split_adjusted_column_names + self._split_adjusted_asof = split_adjusted_asof + self._split_adjustment_dict = {} + super(SplitAdjustedEstimatesLoader, self).__init__(estimates, name_map) + + @abstractmethod + def collect_split_adjustments( + self, + adjustments_for_sid, + requested_qtr_data, + dates, + sid, + sid_idx, + sid_estimates, + split_adjusted_asof_idx, + pre_adjustments, + post_adjustments, + requested_split_adjusted_columns, + ): + raise NotImplementedError("collect_split_adjustments") + + def get_adjustments_for_sid( + self, + group, + dates, + requested_qtr_data, + last_per_qtr, + sid_to_idx, + columns, + col_to_all_adjustments, + split_adjusted_asof_idx=None, + split_adjusted_cols_for_group=None, + ): + """Collects both overwrites and adjustments for a particular sid. + + Parameters + ---------- + split_adjusted_asof_idx : int + The integer index of the date on which the data was split-adjusted. + split_adjusted_cols_for_group : list of str + The names of requested columns that should also be split-adjusted. + """ + all_adjustments_for_sid = {} + sid = int(group.name) + self.collect_overwrites_for_sid( + group, + dates, + requested_qtr_data, + last_per_qtr, + sid_to_idx[sid], + columns, + all_adjustments_for_sid, + sid, + ) + ( + pre_adjustments, + post_adjustments, + ) = self.retrieve_split_adjustment_data_for_sid( + dates, sid, split_adjusted_asof_idx + ) + sid_estimates = self.estimates[self.estimates[SID_FIELD_NAME] == sid] + # We might not have any overwrites but still have + # adjustments, and we will need to manually add columns if + # that is the case. + for col_name in split_adjusted_cols_for_group: + if col_name not in all_adjustments_for_sid: + all_adjustments_for_sid[col_name] = {} + + self.collect_split_adjustments( + all_adjustments_for_sid, + requested_qtr_data, + dates, + sid, + sid_to_idx[sid], + sid_estimates, + split_adjusted_asof_idx, + pre_adjustments, + post_adjustments, + split_adjusted_cols_for_group, + ) + self.merge_into_adjustments_for_all_sids( + all_adjustments_for_sid, col_to_all_adjustments + ) + + def get_adjustments( + self, + zero_qtr_data, + requested_qtr_data, + last_per_qtr, + dates, + assets, + columns, + **kwargs, + ): + """Calculates both split adjustments and overwrites for all sids.""" + split_adjusted_cols_for_group = [ + self.name_map[col.name] + for col in columns + if self.name_map[col.name] in self._split_adjusted_column_names + ] + # Add all splits to the adjustment dict for this sid. + split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(dates) + return super(SplitAdjustedEstimatesLoader, self).get_adjustments( + zero_qtr_data, + requested_qtr_data, + last_per_qtr, + dates, + assets, + columns, + split_adjusted_cols_for_group=split_adjusted_cols_for_group, + split_adjusted_asof_idx=split_adjusted_asof_idx, + ) + + def determine_end_idx_for_adjustment( + self, adjustment_ts, dates, upper_bound, requested_quarter, sid_estimates + ): + """Determines the date until which the adjustment at the given date + index should be applied for the given quarter. + + Parameters + ---------- + adjustment_ts : pd.Timestamp + The timestamp at which the adjustment occurs. + dates : pd.DatetimeIndex + The calendar dates over which the Pipeline is being computed. + upper_bound : int + The index of the upper bound in the calendar dates. This is the + index until which the adjusment will be applied unless there is + information for the requested quarter that comes in on or before + that date. + requested_quarter : float + The quarter for which we are determining how the adjustment + should be applied. + sid_estimates : pd.DataFrame + The DataFrame of estimates data for the sid for which we're + applying the given adjustment. + + Returns + ------- + end_idx : int + The last index to which the adjustment should be applied for the + given quarter/sid. + """ + end_idx = upper_bound + # Find the next newest kd that happens on or after + # the date of this adjustment + newest_kd_for_qtr = sid_estimates[ + (sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) + & (sid_estimates[TS_FIELD_NAME] >= adjustment_ts) + ][TS_FIELD_NAME].min() + if pd.notnull(newest_kd_for_qtr): + newest_kd_idx = dates.searchsorted(newest_kd_for_qtr) + # make_utc_aware(pd.DatetimeIndex(newest_kd_for_qtr)) + # We have fresh information that comes in + # before the end of the overwrite and + # presumably is already split-adjusted to the + # current split. We should stop applying the + # adjustment the day before this new + # information comes in. + if newest_kd_idx <= upper_bound: + end_idx = newest_kd_idx - 1 + return end_idx + + def collect_pre_split_asof_date_adjustments( + self, + split_adjusted_asof_date_idx, + sid_idx, + pre_adjustments, + requested_split_adjusted_columns, + ): + """Collect split adjustments that occur before the + split-adjusted-asof-date. All those adjustments must first be + UN-applied at the first date index and then re-applied on the + appropriate dates in order to match point in time share pricing data. + + Parameters + ---------- + split_adjusted_asof_date_idx : int + The index in the calendar dates as-of which all data was + split-adjusted. + sid_idx : int + The index of the sid for which adjustments should be collected in + the adjusted array. + pre_adjustments : tuple(list(float), list(int)) + The adjustment values, indexes in `dates`, and timestamps for + adjustments that happened after the split-asof-date. + requested_split_adjusted_columns : list of str + The requested split adjusted columns. + + Returns + ------- + col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] + The adjustments for this sid that occurred on or before the + split-asof-date. + """ + col_to_split_adjustments = {} + if len(pre_adjustments[0]): + adjustment_values, date_indexes = pre_adjustments + for column_name in requested_split_adjusted_columns: + col_to_split_adjustments[column_name] = {} + # We need to undo all adjustments that happen before the + # split_asof_date here by reversing the split ratio. + col_to_split_adjustments[column_name][0] = [ + Float64Multiply( + 0, + split_adjusted_asof_date_idx, + sid_idx, + sid_idx, + 1 / future_adjustment, + ) + for future_adjustment in adjustment_values + ] + + for adjustment, date_index in zip(adjustment_values, date_indexes): + adj = Float64Multiply( + 0, split_adjusted_asof_date_idx, sid_idx, sid_idx, adjustment + ) + add_new_adjustments( + col_to_split_adjustments, [adj], column_name, date_index + ) + + return col_to_split_adjustments + + def collect_post_asof_split_adjustments( + self, + post_adjustments, + requested_qtr_data, + sid, + sid_idx, + sid_estimates, + requested_split_adjusted_columns, + ): + """Collect split adjustments that occur after the + split-adjusted-asof-date. Each adjustment needs to be applied to all + dates on which knowledge for the requested quarter was older than the + date of the adjustment. + + Parameters + ---------- + post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) + The adjustment values, indexes in `dates`, and timestamps for + adjustments that happened after the split-asof-date. + requested_qtr_data : pd.DataFrame + The requested quarter data for each calendar date per sid. + sid : int + The sid for which adjustments need to be collected. + sid_idx : int + The index of `sid` in the adjusted array. + sid_estimates : pd.DataFrame + The raw estimates data for this sid. + requested_split_adjusted_columns : list of str + The requested split adjusted columns. + Returns + ------- + col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] + The adjustments for this sid that occurred after the + split-asof-date. + """ + col_to_split_adjustments = {} + if post_adjustments: + # Get an integer index + requested_qtr_timeline = requested_qtr_data[SHIFTED_NORMALIZED_QTRS][ + sid + ].reset_index() + requested_qtr_timeline = requested_qtr_timeline[ + requested_qtr_timeline[sid].notnull() + ] + + # Split the data into range by quarter and determine which quarter + # was being requested in each range. + # Split integer indexes up by quarter range + qtr_ranges_idxs = np.split( + requested_qtr_timeline.index, + np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1, + ) + requested_quarters_per_range = [ + requested_qtr_timeline[sid][r[0]] for r in qtr_ranges_idxs + ] + # Try to apply each adjustment to each quarter range. + for i, qtr_range in enumerate(qtr_ranges_idxs): + for adjustment, date_index, timestamp in zip(*post_adjustments): + # In the default case, apply through the end of the quarter + upper_bound = qtr_range[-1] + # Find the smallest KD in estimates that is on or after the + # date of the given adjustment. Apply the given adjustment + # until that KD. + end_idx = self.determine_end_idx_for_adjustment( + timestamp, + requested_qtr_data.index, + upper_bound, + requested_quarters_per_range[i], + sid_estimates, + ) + # In the default case, apply adjustment on the first day of + # the quarter. + start_idx = qtr_range[0] + # If the adjustment happens during this quarter, apply the + # adjustment on the day it happens. + if date_index > start_idx: + start_idx = date_index + # We only want to apply the adjustment if we have any stale + # data to apply it to. + if qtr_range[0] <= end_idx: + for column_name in requested_split_adjusted_columns: + if column_name not in col_to_split_adjustments: + col_to_split_adjustments[column_name] = {} + adj = Float64Multiply( + # Always apply from first day of qtr + qtr_range[0], + end_idx, + sid_idx, + sid_idx, + adjustment, + ) + add_new_adjustments( + col_to_split_adjustments, [adj], column_name, start_idx + ) + + return col_to_split_adjustments + + def retrieve_split_adjustment_data_for_sid( + self, dates, sid, split_adjusted_asof_idx + ): + """ + + dates : pd.DatetimeIndex + The calendar dates. + sid : int + The sid for which we want to retrieve adjustments. + split_adjusted_asof_idx : int + The index in `dates` as-of which the data is split adjusted. + + Returns + ------- + pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) + The adjustment values and indexes in `dates` for + adjustments that happened before the split-asof-date. + post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) + The adjustment values, indexes in `dates`, and timestamps for + adjustments that happened after the split-asof-date. + """ + adjustments = self._split_adjustments.get_adjustments_for_sid("splits", sid) + sorted(adjustments, key=lambda adj: adj[0]) + # Get rid of any adjustments that happen outside of our date index. + adjustments = list(filter(lambda x: dates[0] <= x[0] <= dates[-1], adjustments)) + adjustment_values = np.array([adj[1] for adj in adjustments]) + timestamps = pd.DatetimeIndex([adj[0] for adj in adjustments]) + # We need the first date on which we would have known about each + # adjustment. + date_indexes = dates.searchsorted(timestamps) + pre_adjustment_idxs = np.where(date_indexes <= split_adjusted_asof_idx)[0] + last_adjustment_split_asof_idx = -1 + if len(pre_adjustment_idxs): + last_adjustment_split_asof_idx = pre_adjustment_idxs.max() + pre_adjustments = ( + adjustment_values[: last_adjustment_split_asof_idx + 1], + date_indexes[: last_adjustment_split_asof_idx + 1], + ) + post_adjustments = ( + adjustment_values[last_adjustment_split_asof_idx + 1 :], + date_indexes[last_adjustment_split_asof_idx + 1 :], + timestamps[last_adjustment_split_asof_idx + 1 :], + ) + return pre_adjustments, post_adjustments + + def _collect_adjustments( + self, + requested_qtr_data, + sid, + sid_idx, + sid_estimates, + split_adjusted_asof_idx, + pre_adjustments, + post_adjustments, + requested_split_adjusted_columns, + ): + pre_adjustments_dict = self.collect_pre_split_asof_date_adjustments( + split_adjusted_asof_idx, + sid_idx, + pre_adjustments, + requested_split_adjusted_columns, + ) + + post_adjustments_dict = self.collect_post_asof_split_adjustments( + post_adjustments, + requested_qtr_data, + sid, + sid_idx, + sid_estimates, + requested_split_adjusted_columns, + ) + return pre_adjustments_dict, post_adjustments_dict + + def merge_split_adjustments_with_overwrites( + self, pre, post, overwrites, requested_split_adjusted_columns + ): + """Merge split adjustments with the dict containing overwrites. + + Parameters + ---------- + pre : dict[str -> dict[int -> list]] + The adjustments that occur before the split-adjusted-asof-date. + post : dict[str -> dict[int -> list]] + The adjustments that occur after the split-adjusted-asof-date. + overwrites : dict[str -> dict[int -> list]] + The overwrites across all time. Adjustments will be merged into + this dictionary. + requested_split_adjusted_columns : list of str + List of names of split adjusted columns that are being requested. + """ + for column_name in requested_split_adjusted_columns: + # We can do a merge here because the timestamps in 'pre' and + # 'post' are guaranteed to not overlap. + if pre: + # Either empty or contains all columns. + for ts in pre[column_name]: + add_new_adjustments( + overwrites, pre[column_name][ts], column_name, ts + ) + if post: + # Either empty or contains all columns. + for ts in post[column_name]: + add_new_adjustments( + overwrites, post[column_name][ts], column_name, ts + ) + diff --git a/ziplime/pipeline/loaders/synthetic.py b/ziplime/pipeline/loaders/synthetic.py index 816048f2c9..deb4932134 100644 --- a/ziplime/pipeline/loaders/synthetic.py +++ b/ziplime/pipeline/loaders/synthetic.py @@ -6,8 +6,8 @@ from pandas import DataFrame, Timestamp from sqlite3 import connect as sqlite3_connect -from .base import PipelineLoader -from .frame import DataFrameLoader +from .pipeline_loader import PipelineLoader +from .data_frame_loader import DataFrameLoader from ziplime.utils.numpy_utils import ( @@ -36,153 +36,6 @@ def nanos_to_seconds(nanos): return nanos / (1000 * 1000 * 1000) -class PrecomputedLoader(PipelineLoader): - """Synthetic PipelineLoader that uses a pre-computed array for each column. - - Parameters - ---------- - values : dict - Map from column to values to use for that column. - Values can be anything that can be passed as the first positional - argument to a DataFrame whose indices are ``dates`` and ``sids`` - dates : iterable[datetime-like] - Row labels for input data. Can be anything that pd.DataFrame will - coerce to a DatetimeIndex. - sids : iterable[int-like] - Column labels for input data. Can be anything that pd.DataFrame will - coerce to an Int64Index. - - Notes - ----- - Adjustments are unsupported by this loader. - """ - - def __init__(self, constants, dates, sids): - loaders = {} - for column, const in constants.items(): - frame = DataFrame( - const, - index=dates, - columns=sids, - dtype=column.dtype, - ) - loaders[column] = DataFrameLoader( - column=column, - baseline=frame, - adjustments=None, - ) - - self._loaders = loaders - - def load_adjusted_array(self, domain, columns, dates, sids, mask): - """Load by delegating to sub-loaders.""" - out = {} - for col in columns: - try: - loader = self._loaders.get(col) - if loader is None: - loader = self._loaders[col.unspecialize()] - except KeyError as exc: - raise ValueError("Couldn't find loader for %s" % col) from exc - out.update(loader.load_adjusted_array(domain, [col], dates, sids, mask)) - return out - - -class EyeLoader(PrecomputedLoader): - """A PrecomputedLoader that emits arrays containing 1s on the diagonal and 0s - elsewhere. - - Parameters - ---------- - columns : list[BoundColumn] - Columns that this loader should know about. - dates : iterable[datetime-like] - Same as PrecomputedLoader. - sids : iterable[int-like] - Same as PrecomputedLoader - """ - - def __init__(self, columns, dates, sids): - shape = (len(dates), len(sids)) - super(EyeLoader, self).__init__( - {column: np.eye(shape, dtype=column.dtype) for column in columns}, - dates, - sids, - ) - - -class SeededRandomLoader(PrecomputedLoader): - """A PrecomputedLoader that emits arrays randomly-generated with a given seed. - - Parameters - ---------- - seed : int - Seed for numpy.random.RandomState. - columns : list[BoundColumn] - Columns that this loader should know about. - dates : iterable[datetime-like] - Same as PrecomputedLoader. - sids : iterable[int-like] - Same as PrecomputedLoader - """ - - def __init__(self, seed, columns, dates, sids): - self._seed = seed - super(SeededRandomLoader, self).__init__( - {c: self.values(c.dtype, dates, sids) for c in columns}, - dates, - sids, - ) - - def values(self, dtype, dates, sids): - """Make a random array of shape (len(dates), len(sids)) with ``dtype``.""" - shape = (len(dates), len(sids)) - return { - datetime64ns_dtype: self._datetime_values, - float64_dtype: self._float_values, - int64_dtype: self._int_values, - bool_dtype: self._bool_values, - object_dtype: self._object_values, - }[dtype](shape) - - @property - def state(self): - """Make a new RandomState from our seed. - - This ensures that every call to _*_values produces the same output - every time for a given SeededRandomLoader instance. - """ - return RandomState(self._seed) - - def _float_values(self, shape): - """Return uniformly-distributed floats between -0.0 and 100.0.""" - return self.state.uniform(low=0.0, high=100.0, size=shape) - - def _int_values(self, shape): - """ - Return uniformly-distributed integers between 0 and 100. - """ - return self.state.randint(low=0, high=100, size=shape).astype( - "int64" - ) # default is system int - - def _datetime_values(self, shape): - """Return uniformly-distributed dates in 2014.""" - start = Timestamp("2014", tz="UTC").asm8 - offsets = self.state.randint( - low=0, - high=364, - size=shape, - ).astype("timedelta64[D]") - return start + offsets - - def _bool_values(self, shape): - """Return uniformly-distributed True/False values.""" - return self.state.randn(*shape) < 0 - - def _object_values(self, shape): - res = self._int_values(shape).astype(str).astype(object) - return res OHLCV = ("open", "high", "low", "close", "volume") diff --git a/ziplime/pipeline/loaders/utils.py b/ziplime/pipeline/loaders/utils.py index 771ddd4092..f5955c9962 100644 --- a/ziplime/pipeline/loaders/utils.py +++ b/ziplime/pipeline/loaders/utils.py @@ -1,7 +1,7 @@ import numpy as np import pandas as pd from ziplime.errors import NoFurtherDataError -from ziplime.pipeline.common import TS_FIELD_NAME, SID_FIELD_NAME +from ziplime.pipeline.common import TS_FIELD_NAME, SID_FIELD_NAME, metadata_columns from ziplime.utils.date_utils import make_utc_aware from ziplime.utils.numpy_utils import categorical_dtype @@ -14,7 +14,7 @@ def is_sorted_ascending(a): def validate_event_metadata(event_dates, event_timestamps, event_sids): assert is_sorted_ascending(event_dates), "event dates must be sorted" assert ( - len(event_sids) == len(event_dates) == len(event_timestamps) + len(event_sids) == len(event_dates) == len(event_timestamps) ), "mismatched arrays: %d != %d != %d" % ( len(event_sids), len(event_dates), @@ -23,7 +23,7 @@ def validate_event_metadata(event_dates, event_timestamps, event_sids): def next_event_indexer( - all_dates, data_query_cutoff, all_sids, event_dates, event_timestamps, event_sids + all_dates, data_query_cutoff, all_sids, event_dates, event_timestamps, event_sids ): """ Construct an index array that, when applied to an array of values, produces @@ -81,7 +81,7 @@ def next_event_indexer( def previous_event_indexer( - data_query_cutoff_times, all_sids, event_dates, event_timestamps, event_sids + data_query_cutoff_times, all_sids, event_dates, event_timestamps, event_sids ): """ Construct an index array that, when applied to an array of values, produces @@ -136,18 +136,18 @@ def previous_event_indexer( for i in range(len(event_dates) - 1, -1, -1): sid_ix = sid_ixs[i] dt_ix = dt_ixs[i] - out[dt_ix : last_written.get(sid_ix, None), sid_ix] = i + out[dt_ix: last_written.get(sid_ix, None), sid_ix] = i last_written[sid_ix] = dt_ix return out def last_in_date_group( - df, - data_query_cutoff_times, - assets, - reindex=True, - have_sids=True, - extra_groupers=None, + df, + data_query_cutoff_times, + assets, + reindex=True, + have_sids=True, + extra_groupers=None, ): """ Determine the last piece of information known on each date in the date @@ -192,7 +192,7 @@ def last_in_date_group( extra_groupers = [] idx += extra_groupers - to_unstack = idx[-1 : -len(idx) : -1] + to_unstack = idx[-1: -len(idx): -1] last_in_group = ( df.drop(TS_FIELD_NAME, axis=1) .groupby(idx, sort=False) @@ -211,7 +211,7 @@ def last_in_date_group( if have_sids: cols = last_in_group.columns columns = pd.MultiIndex.from_product( - tuple(cols.levels[0 : len(extra_groupers) + 1]) + (assets,), + tuple(cols.levels[0: len(extra_groupers) + 1]) + (assets,), names=cols.names, ) last_in_group = last_in_group.reindex( @@ -343,4 +343,68 @@ def shift_dates(dates, start_date, end_date, shift): else: raise ValueError("Query end %s not in calendar" % end_date) from exc - return dates[start - shift : end - shift + 1] # +1 to be inclusive + return dates[start - shift: end - shift + 1] # +1 to be inclusive + + +def normalize_quarters(years, quarters): + return years * 4 + quarters - 1 + + +def split_normalized_quarters(normalized_quarters): + years = normalized_quarters // 4 + quarters = normalized_quarters % 4 + return years, quarters + 1 + + +def required_estimates_fields(columns): + """Compute the set of resource columns required to serve + `columns`. + """ + # We also expect any of the field names that our loadable columns + # are mapped to. + return metadata_columns.union(columns.values()) + + +def validate_column_specs(events, columns): + """Verify that the columns of ``events`` can be used by a + EarningsEstimatesLoader to serve the BoundColumns described by + `columns`. + """ + required = required_estimates_fields(columns) + received = set(events.columns) + missing = required - received + if missing: + raise ValueError( + "EarningsEstimatesLoader missing required columns {missing}.\n" + "Got Columns: {received}\n" + "Expected Columns: {required}".format( + missing=sorted(missing), + received=sorted(received), + required=sorted(required), + ) + ) + + +def add_new_adjustments(adjustments_dict, adjustments, column_name, ts): + try: + adjustments_dict[column_name][ts].extend(adjustments) + except KeyError: + adjustments_dict[column_name][ts] = adjustments + +def validate_split_adjusted_column_specs(name_map, columns): + to_be_split = set(columns) + available = set(name_map.keys()) + extra = to_be_split - available + if extra: + raise ValueError( + "EarningsEstimatesLoader got the following extra columns to be " + "split-adjusted: {extra}.\n" + "Got Columns: {to_be_split}\n" + "Available Columns: {available}".format( + extra=sorted(extra), + to_be_split=sorted(to_be_split), + available=sorted(available), + ) + ) + + diff --git a/ziplime/pipeline/mixins.py b/ziplime/pipeline/mixins.py index ccf823f467..11fbf013d0 100644 --- a/ziplime/pipeline/mixins.py +++ b/ziplime/pipeline/mixins.py @@ -31,7 +31,8 @@ from .downsample_helpers import ( select_sampling_indices, SUPPORTED_DOWNSAMPLE_FREQUENCIES, ) -from .term import Term +from ziplime.pipeline.terms.term import Term + class PositiveWindowLengthMixin(Term): @@ -187,7 +188,9 @@ def _format_inputs(self, windows, column_mask): inputs = [] for input_ in windows: window = next(input_) - if window.shape[1] == 1: + # if window.shape[1] == 1: + if len(window.shape) == 1: + # Do not mask single-column inputs. inputs.append(window) else: diff --git a/ziplime/pipeline/pipeline.py b/ziplime/pipeline/pipeline.py index fbe8dda104..8a61539b55 100644 --- a/ziplime/pipeline/pipeline.py +++ b/ziplime/pipeline/pipeline.py @@ -4,8 +4,10 @@ from .domain import Domain, GENERIC, infer_domain from .graph import ExecutionPlan, TermGraph, SCREEN_NAME -from .filters import Filter -from .term import AssetExists, ComputableTerm, Term +from .terms.asset_exists import AssetExists +from .terms.computable_term import ComputableTerm +from .terms.filters import Filter +from .terms.term import Term class Pipeline: diff --git a/ziplime/pipeline/term.py b/ziplime/pipeline/term.py deleted file mode 100644 index cadc70f10b..0000000000 --- a/ziplime/pipeline/term.py +++ /dev/null @@ -1,1007 +0,0 @@ -""" -Base class for Filters, Factors and Classifiers -""" - -from abc import ABC, abstractmethod -from bisect import insort -from collections.abc import Mapping -from weakref import WeakValueDictionary - -from numpy import ( - array, - record, - dtype as dtype_class, - ndarray, -) -from ziplime.errors import ( - DTypeNotSpecified, - InvalidOutputName, - NonSliceableTerm, - NonWindowSafeInput, - NotDType, - NonPipelineInputs, - TermInputsNotSpecified, - TermOutputsEmpty, - UnsupportedDType, - WindowLengthNotSpecified, -) -from ziplime.lib.adjusted_array import can_represent_dtype -from ziplime.lib.labelarray import LabelArray -from ziplime.utils.numpy_utils import ( - bool_dtype, - categorical_dtype, - datetime64ns_dtype, - default_missing_value_for_dtype, - float64_dtype, -) -from ziplime.utils.sharedoc import ( - templated_docstring, - PIPELINE_ALIAS_NAME_DOC, - PIPELINE_DOWNSAMPLING_FREQUENCY_DOC, -) - -from .domain import Domain, GENERIC, infer_domain -from .downsample_helpers import SUPPORTED_DOWNSAMPLE_FREQUENCIES -from ziplime.assets.entities.asset import Asset - - -class Term(ABC): - """ - Base class for objects that can appear in the compute graph of a - :class:`ziplime.pipeline.Pipeline`. - - Notes - ----- - Most Pipeline API users only interact with :class:`Term` via subclasses: - - - :class:`~ziplime.pipeline.data.BoundColumn` - - :class:`~ziplime.pipeline.Factor` - - :class:`~ziplime.pipeline.Filter` - - :class:`~ziplime.pipeline.Classifier` - - Instances of :class:`Term` are **memoized**. If you call a Term's - constructor with the same arguments twice, the same object will be returned - from both calls: - - **Example:** - - >>> from ziplime.pipeline.data import EquityPricing - >>> from ziplime.pipeline.factors import SimpleMovingAverage - >>> x = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5) - >>> y = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5) - >>> x is y - True - - .. warning:: - - Memoization of terms means that it's generally unsafe to modify - attributes of a term after construction. - """ - - # These are NotSpecified because a subclass is required to provide them. - dtype = None - missing_value = None - - # Subclasses aren't required to provide `params`. The default behavior is - # no params. - params = () - - # All terms are generic by default. - domain = GENERIC - - # Determines if a term is safe to be used as a windowed input. - window_safe = False - - # The dimensions of the term's output (1D or 2D). - ndim = 2 - - _term_cache = WeakValueDictionary() - - def __new__( - cls, - domain=None, - dtype=None, - missing_value=None, - window_safe=None, - ndim=None, - # params is explicitly not allowed to be passed to an instance. - *args, - **kwargs, - ): - """ - Memoized constructor for Terms. - - Caching previously-constructed Terms is useful because it allows us to - only compute equivalent sub-expressions once when traversing a Pipeline - dependency graph. - - Caching previously-constructed Terms is **sane** because terms and - their inputs are both conceptually immutable. - """ - # Subclasses can override these class-level attributes to provide - # different default values for instances. - if domain is None: - domain = cls.domain - if dtype is None: - dtype = cls.dtype - if missing_value is None: - missing_value = cls.missing_value - if ndim is None: - ndim = cls.ndim - if window_safe is None: - window_safe = cls.window_safe - - dtype, missing_value = validate_dtype( - cls.__name__, - dtype, - missing_value, - ) - params = cls._pop_params(kwargs) - - identity = cls._static_identity( - domain=domain, - dtype=dtype, - missing_value=missing_value, - window_safe=window_safe, - ndim=ndim, - params=params, - *args, - **kwargs, - ) - - try: - return cls._term_cache[identity] - except KeyError: - new_instance = cls._term_cache[identity] = ( - super(Term, cls) - .__new__(cls) - ._init( - domain=domain, - dtype=dtype, - missing_value=missing_value, - window_safe=window_safe, - ndim=ndim, - params=params, - *args, - **kwargs, - ) - ) - return new_instance - - @classmethod - def _pop_params(cls, kwargs): - """ - Pop entries from the `kwargs` passed to cls.__new__ based on the values - in `cls.params`. - - Parameters - ---------- - kwargs : dict - The kwargs passed to cls.__new__. - - Returns - ------- - params : list[(str, object)] - A list of string, value pairs containing the entries in cls.params. - - Raises - ------ - TypeError - Raised if any parameter values are not passed or not hashable. - """ - params = cls.params - if not isinstance(params, Mapping): - params = {k: None for k in params} - param_values = [] - for key, default_value in params.items(): - try: - value = kwargs.pop(key, default_value) - if value is None: - raise KeyError(key) - - # Check here that the value is hashable so that we fail here - # instead of trying to hash the param values tuple later. - hash(value) - except KeyError as exc: - raise TypeError( - "{typename} expected a keyword parameter {name!r}.".format( - typename=cls.__name__, name=key - ) - ) from exc - except TypeError as exc: - # Value wasn't hashable. - raise TypeError( - "{typename} expected a hashable value for parameter " - "{name!r}, but got {value!r} instead.".format( - typename=cls.__name__, - name=key, - value=value, - ) - ) from exc - - param_values.append((key, value)) - return tuple(param_values) - - def __init__(self, *args, **kwargs): - """ - Noop constructor to play nicely with our caching __new__. Subclasses - should implement _init instead of this method. - - When a class' __new__ returns an instance of that class, Python will - automatically call __init__ on the object, even if a new object wasn't - actually constructed. Because we memoize instances, we often return an - object that was already initialized from __new__, in which case we - don't want to call __init__ again. - - Subclasses that need to initialize new instances should override _init, - which is guaranteed to be called only once. - """ - pass - - def __getitem__(self, key: Asset): - if isinstance(self, LoadableTerm): - raise NonSliceableTerm(term=self) - - from .mixins import SliceMixin - - slice_type = type(self)._with_mixin(SliceMixin) - return slice_type(self, key) - - @classmethod - def _static_identity(cls, domain, dtype, missing_value, window_safe, ndim, params): - """ - Return the identity of the Term that would be constructed from the - given arguments. - - Identities that compare equal will cause us to return a cached instance - rather than constructing a new one. We do this primarily because it - makes dependency resolution easier. - - This is a classmethod so that it can be called from Term.__new__ to - determine whether to produce a new instance. - """ - return (cls, domain, dtype, missing_value, window_safe, ndim, params) - - def _init(self, domain, dtype, missing_value, window_safe, ndim, params): - """ - Parameters - ---------- - domain : ziplime.pipeline.domain.Domain - The domain of this term. - dtype : np.dtype - Dtype of this term's output. - missing_value : object - Missing value for this term. - ndim : 1 or 2 - The dimensionality of this term. - params : tuple[(str, hashable)] - Tuple of key/value pairs of additional parameters. - """ - self.domain = domain - self.dtype = dtype - self.missing_value = missing_value - self.window_safe = window_safe - self.ndim = ndim - - for name, _ in params: - if hasattr(self, name): - raise TypeError( - "Parameter {name!r} conflicts with already-present" - " attribute with value {value!r}.".format( - name=name, - value=getattr(self, name), - ) - ) - # TODO: Consider setting these values as attributes and replacing - # the boilerplate in NumericalExpression, Rank, and - # PercentileFilter. - - self.params = dict(params) - - # Make sure that subclasses call super() in their _validate() methods - # by setting this flag. The base class implementation of _validate - # should set this flag to True. - self._subclass_called_super_validate = False - self._validate() - assert self._subclass_called_super_validate, ( - "Term._validate() was not called.\n" - "This probably means that you overrode _validate" - " without calling super()." - ) - del self._subclass_called_super_validate - - return self - - def _validate(self): - """ - Assert that this term is well-formed. This should be called exactly - once, at the end of Term._init(). - """ - # mark that we got here to enforce that subclasses overriding _validate - # call super(). - self._subclass_called_super_validate = True - - def compute_extra_rows(self, all_dates, start_date, end_date, min_extra_rows): - """ - Calculate the number of extra rows needed to compute ``self``. - - Must return at least ``min_extra_rows``, and the default implementation - is to just return ``min_extra_rows``. This is overridden by - downsampled terms to ensure that the first date computed is a - recomputation date. - - Parameters - ---------- - all_dates : pd.DatetimeIndex - The trading sessions against which ``self`` will be computed. - start_date : pd.Timestamp - The first date for which final output is requested. - end_date : pd.Timestamp - The last date for which final output is requested. - min_extra_rows : int - The minimum number of extra rows required of ``self``, as - determined by other terms that depend on ``self``. - - Returns - ------- - extra_rows : int - The number of extra rows to compute. Must be at least - ``min_extra_rows``. - """ - return min_extra_rows - - @property - @abstractmethod - def inputs(self): - """ - A tuple of other Terms needed as inputs for ``self``. - """ - raise NotImplementedError("inputs") - - @property - @abstractmethod - def windowed(self): - """ - Boolean indicating whether this term is a trailing-window computation. - """ - raise NotImplementedError("windowed") - - @property - @abstractmethod - def mask(self): - """ - A :class:`~ziplime.pipeline.Filter` representing asset/date pairs to - while computing this Term. True means include; False means exclude. - """ - raise NotImplementedError("mask") - - @property - @abstractmethod - def dependencies(self): - """ - A dictionary mapping terms that must be computed before `self` to the - number of extra rows needed for those terms. - """ - raise NotImplementedError("dependencies") - - def graph_repr(self): - """A short repr to use when rendering GraphViz graphs.""" - # Default graph_repr is just the name of the type. - return type(self).__name__ - - def recursive_repr(self): - """A short repr to use when recursively rendering terms with inputs.""" - # Default recursive_repr is just the name of the type. - return type(self).__name__ - - -class AssetExists(Term): - """ - Pseudo-filter describing whether or not an asset existed on a given day. - This is the default mask for all terms that haven't been passed a mask - explicitly. - - This is morally a Filter, in the sense that it produces a boolean value for - every asset on every date. We don't subclass Filter, however, because - `AssetExists` is computed directly by the PipelineEngine. - - This term is guaranteed to be available as an input for any term computed - by SimplePipelineEngine.run_pipeline(). - - See Also - -------- - ziplime.assets.AssetFinder.lifetimes - """ - - dtype = bool_dtype - dataset = None - inputs = () - dependencies = {} - mask = None - windowed = False - - def __repr__(self): - return "AssetExists()" - - graph_repr = __repr__ - - def _compute(self, today, assets, out): - raise NotImplementedError( - "AssetExists cannot be computed directly." - " Check your PipelineEngine configuration." - ) - - -class InputDates(Term): - """ - 1-Dimensional term providing date labels for other term inputs. - - This term is guaranteed to be available as an input for any term computed - by SimplePipelineEngine.run_pipeline(). - """ - - ndim = 1 - dataset = None - dtype = datetime64ns_dtype - inputs = () - dependencies = {} - mask = None - windowed = False - window_safe = True - - def __repr__(self): - return "InputDates()" - - graph_repr = __repr__ - - def _compute(self, today, assets, out): - raise NotImplementedError( - "InputDates cannot be computed directly." - " Check your PipelineEngine configuration." - ) - - -class LoadableTerm(Term): - """ - A Term that should be loaded from an external resource by a PipelineLoader. - - This is the base class for :class:`ziplime.pipeline.data.BoundColumn`. - """ - - windowed = False - inputs = () - - @property - def dependencies(self): - return {self.mask: 0} - - -class ComputableTerm(Term): - """ - A Term that should be computed from a tuple of inputs. - - This is the base class for :class:`ziplime.pipeline.Factor`, - :class:`ziplime.pipeline.Filter`, and :class:`ziplime.pipeline.Classifier`. - """ - - inputs = None - outputs = None - window_length = None - mask = None - domain = None - - def __new__( - cls, - inputs=inputs, - outputs=outputs, - window_length=window_length, - mask=mask, - domain=domain, - *args, - **kwargs, - ): - - if inputs is None: - inputs = cls.inputs - - # Having inputs = None is an error, but we handle it later - # in self._validate rather than here. - if inputs is not None: - # Allow users to specify lists as class-level defaults, but - # normalize to a tuple so that inputs is hashable. - inputs = tuple(inputs) - - # Make sure all our inputs are valid pipeline objects before trying - # to infer a domain. - non_terms = [t for t in inputs if not isinstance(t, Term)] - if non_terms: - raise NonPipelineInputs(cls.__name__, non_terms) - - if domain is None: - domain = infer_domain(inputs) - - if outputs is None: - outputs = cls.outputs - if outputs is not None: - outputs = tuple(outputs) - - if mask is None: - mask = cls.mask - if mask is None: - mask = AssetExists() - - if window_length is None: - window_length = cls.window_length - - return super(ComputableTerm, cls).__new__( - cls, - inputs=inputs, - outputs=outputs, - mask=mask, - window_length=window_length, - domain=domain, - *args, - **kwargs, - ) - - def _init(self, inputs, outputs, window_length, mask, *args, **kwargs): - self.inputs = inputs - self.outputs = outputs - self.window_length = window_length - self.mask = mask - return super(ComputableTerm, self)._init(*args, **kwargs) - - @classmethod - def _static_identity(cls, inputs, outputs, window_length, mask, *args, **kwargs): - return ( - super(ComputableTerm, cls)._static_identity(*args, **kwargs), - inputs, - outputs, - window_length, - mask, - ) - - def _validate(self): - super(ComputableTerm, self)._validate() - - # Check inputs. - if self.inputs is None: - raise TermInputsNotSpecified(termname=type(self).__name__) - - if not isinstance(self.domain, Domain): - raise TypeError( - "Expected {}.domain to be an instance of Domain, " - "but got {}.".format(type(self).__name__, type(self.domain)) - ) - - # Check outputs. - if self.outputs is None: - pass - elif not self.outputs: - raise TermOutputsEmpty(termname=type(self).__name__) - else: - # Raise an exception if there are any naming conflicts between the - # term's output names and certain attributes. - disallowed_names = [ - attr for attr in dir(ComputableTerm) if not attr.startswith("_") - ] - - # The name 'compute' is an added special case that is disallowed. - # Use insort to add it to the list in alphabetical order. - insort(disallowed_names, "compute") - - for output in self.outputs: - if output.startswith("_") or output in disallowed_names: - raise InvalidOutputName( - output_name=output, - termname=type(self).__name__, - disallowed_names=disallowed_names, - ) - - if self.window_length is None: - raise WindowLengthNotSpecified(termname=type(self).__name__) - - if self.mask is None: - # This isn't user error, this is a bug in our code. - raise AssertionError("{term} has no mask".format(term=self)) - - if self.window_length > 1: - for child in self.inputs: - if not child.window_safe: - raise NonWindowSafeInput(parent=self, child=child) - - def _compute(self, inputs, dates, assets, mask): - """ - Subclasses should implement this to perform actual computation. - - This is named ``_compute`` rather than just ``compute`` because - ``compute`` is reserved for user-supplied functions in - CustomFilter/CustomFactor/CustomClassifier. - """ - raise NotImplementedError("_compute") - - # NOTE: This is a method rather than a property because ABCMeta tries to - # access all abstract attributes of its child classes to see if - # they've been implemented. These accesses happen during subclass - # creation, before the new subclass has been bound to a name in its - # defining scope. Filter, Factor, and Classifier each implement this - # method to return themselves, but if the method is invoked before - # class definition is finished (which happens if this is a property), - # they fail with a NameError. - @classmethod - @abstractmethod - def _principal_computable_term_type(cls): - """ - Return the "principal" type for a ComputableTerm. - - This returns either Filter, Factor, or Classifier, depending on the - type of ``cls``. It is used to implement behaviors like ``downsample`` - and ``if_then_else`` that are implemented on all ComputableTerms, but - that need to produce different output types depending on the type of - the receiver. - """ - raise NotImplementedError("_principal_computable_term_type") - - @property - def windowed(self): - """ - Whether or not this term represents a trailing window computation. - - If term.windowed is truthy, its compute_from_windows method will be - called with instances of AdjustedArray as inputs. - - If term.windowed is falsey, its compute_from_baseline will be called - with instances of np.ndarray as inputs. - """ - return self.window_length is not None and self.window_length > 0 - - @property - def dependencies(self): - """ - The number of extra rows needed for each of our inputs to compute this - term. - """ - extra_input_rows = max(0, self.window_length - 1) - out = {} - for term in self.inputs: - out[term] = extra_input_rows - out[self.mask] = 0 - return out - - def postprocess(self, data: ndarray): - """ - Called with an result of ``self``, unravelled (i.e. 1-dimensional) - after any user-defined screens have been applied. - - This is mostly useful for transforming the dtype of an output, e.g., to - convert a LabelArray into a pandas Categorical. - - The default implementation is to just return data unchanged. - """ - # starting with pandas 1.4, record arrays are no longer supported as DataFrame columns - if isinstance(data[0], record): - return [tuple(r) for r in data] - return data - - def to_workspace_value(self, result, assets): - """ - Called with a column of the result of a pipeline. This needs to put - the data into a format that can be used in a workspace to continue - doing computations. - - Parameters - ---------- - result : pd.Series - A multiindexed series with (dates, assets) whose values are the - results of running this pipeline term over the dates. - assets : pd.Index - All of the assets being requested. This allows us to correctly - shape the workspace value. - - Returns - ------- - workspace_value : array-like - An array like value that the engine can consume. - """ - return ( - result.unstack() - .fillna(self.missing_value) - .reindex(columns=assets, fill_value=self.missing_value) - .values - ) - - @templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC) - def downsample(self, frequency): - """ - Make a term that computes from ``self`` at lower-than-daily frequency. - - Parameters - ---------- - {frequency} - """ - from .mixins import DownsampledMixin - if frequency not in SUPPORTED_DOWNSAMPLE_FREQUENCIES: - raise ValueError( - "Invalid downsampling frequency: {frequency}.\n\n" - "Valid downsampling frequencies are: {valid_frequencies}".format( - frequency=frequency, - valid_frequencies=", ".join( - sorted(SUPPORTED_DOWNSAMPLE_FREQUENCIES) - ), - ) - ) - downsampled_type = type(self)._with_mixin(DownsampledMixin) - return downsampled_type(term=self, frequency=frequency) - - @templated_docstring(name=PIPELINE_ALIAS_NAME_DOC) - def alias(self, name): - """ - Make a term from ``self`` that names the expression. - - Parameters - ---------- - {name} - - Returns - ------- - aliased : Aliased - ``self`` with a name. - - Notes - ----- - This is useful for giving a name to a numerical or boolean expression. - """ - from .mixins import AliasedMixin - - aliased_type = type(self)._with_mixin(AliasedMixin) - return aliased_type(term=self, name=name) - - def isnull(self): - """ - A Filter producing True for values where this Factor has missing data. - - Equivalent to self.isnan() when ``self.dtype`` is float64. - Otherwise equivalent to ``self.eq(self.missing_value)``. - - Returns - ------- - filter : ziplime.pipeline.Filter - """ - if self.dtype == bool_dtype: - raise TypeError("isnull() is not supported for Filters") - - from .filters import NullFilter - - if self.dtype == float64_dtype: - # Using isnan is more efficient when possible because we can fold - # the isnan computation with other NumExpr expressions. - return self.isnan() - else: - return NullFilter(self) - - def notnull(self): - """ - A Filter producing True for values where this Factor has complete data. - - Equivalent to ``~self.isnan()` when ``self.dtype`` is float64. - Otherwise equivalent to ``(self != self.missing_value)``. - - Returns - ------- - filter : ziplime.pipeline.Filter - """ - if self.dtype == bool_dtype: - raise TypeError("notnull() is not supported for Filters") - - from .filters import NotNullFilter - - return NotNullFilter(self) - - def fillna(self, fill_value): - """ - Create a new term that fills missing values of this term's output with - ``fill_value``. - - Parameters - ---------- - fill_value : ziplime.pipeline.ComputableTerm, or object. - Object to use as replacement for missing values. - - If a ComputableTerm (e.g. a Factor) is passed, that term's results - will be used as fill values. - - If a scalar (e.g. a number) is passed, the scalar will be used as a - fill value. - - Examples - -------- - - **Filling with a Scalar:** - - Let ``f`` be a Factor which would produce the following output:: - - AAPL MSFT MCD BK - 2017-03-13 1.0 NaN 3.0 4.0 - 2017-03-14 1.5 2.5 NaN NaN - - Then ``f.fillna(0)`` produces the following output:: - - AAPL MSFT MCD BK - 2017-03-13 1.0 0.0 3.0 4.0 - 2017-03-14 1.5 2.5 0.0 0.0 - - **Filling with a Term:** - - Let ``f`` be as above, and let ``g`` be another Factor which would - produce the following output:: - - AAPL MSFT MCD BK - 2017-03-13 10.0 20.0 30.0 40.0 - 2017-03-14 15.0 25.0 35.0 45.0 - - Then, ``f.fillna(g)`` produces the following output:: - - AAPL MSFT MCD BK - 2017-03-13 1.0 20.0 3.0 4.0 - 2017-03-14 1.5 2.5 35.0 45.0 - - Returns - ------- - filled : ziplime.pipeline.ComputableTerm - A term computing the same results as ``self``, but with missing - values filled in using values from ``fill_value``. - """ - if self.dtype == bool_dtype: - raise TypeError("fillna() is not supported for Filters") - - if isinstance(fill_value, LoadableTerm): - raise TypeError( - "Can't use expression {} as a fill value. Did you mean to " - "append '.latest?'".format(fill_value) - ) - elif isinstance(fill_value, ComputableTerm): - if_false = fill_value - else: - # Assume we got a scalar value. Make sure it's compatible with our - # dtype. - try: - fill_value = _coerce_to_dtype(fill_value, self.dtype) - except TypeError as exc: - raise TypeError( - "Fill value {value!r} is not a valid choice " - "for term {termname} with dtype {dtype}.\n\n" - "Coercion attempt failed with: {error}".format( - termname=type(self).__name__, - value=fill_value, - dtype=self.dtype, - error=exc, - ) - ) from exc - - if_false = self._constant_type( - const=fill_value, - dtype=self.dtype, - missing_value=self.missing_value, - ) - - return self.notnull().if_else(if_true=self, if_false=if_false) - - # @classlazyval - @property - def _constant_type(cls): - from .mixins import ConstantMixin - - return cls._with_mixin(ConstantMixin) - - #@classlazyval - @property - def _if_else_type(cls): - from .mixins import IfElseMixin - - return cls._with_mixin(IfElseMixin) - - def __repr__(self): - return ("{type}([{inputs}], {window_length})").format( - type=type(self).__name__, - inputs=", ".join(i.recursive_repr() for i in self.inputs), - window_length=self.window_length, - ) - - def recursive_repr(self): - return type(self).__name__ + "(...)" - - @classmethod - def _with_mixin(cls, mixin_type): - return mixin_type.universal_mixin_specialization( - cls._principal_computable_term_type(), - ) - - -def validate_dtype(termname, dtype, missing_value): - """ - Validate a `dtype` and `missing_value` passed to Term.__new__. - - Ensures that we know how to represent ``dtype``, and that missing_value - is specified for types without default missing values. - - Returns - ------- - validated_dtype, validated_missing_value : np.dtype, any - The dtype and missing_value to use for the new term. - - Raises - ------ - DTypeNotSpecified - When no dtype was passed to the instance, and the class doesn't - provide a default. - NotDType - When either the class or the instance provides a value not - coercible to a numpy dtype. - NoDefaultMissingValue - When dtype requires an explicit missing_value, but - ``missing_value`` is None. - """ - if dtype is None: - raise DTypeNotSpecified(termname=termname) - - try: - dtype = dtype_class(dtype) - except TypeError as exc: - raise NotDType(dtype=dtype, termname=termname) from exc - - if not can_represent_dtype(dtype): - raise UnsupportedDType(dtype=dtype, termname=termname) - - if missing_value is None: - missing_value = default_missing_value_for_dtype(dtype) - - try: - _coerce_to_dtype(missing_value, dtype) - except TypeError as exc: - raise TypeError( - "Missing value {value!r} is not a valid choice " - "for term {termname} with dtype {dtype}.\n\n" - "Coercion attempt failed with: {error}".format( - termname=termname, - value=missing_value, - dtype=dtype, - error=exc, - ) - ) from exc - - return dtype, missing_value - - -def _assert_valid_categorical_missing_value(value): - """ - Check that value is a valid categorical missing_value. - - Raises a TypeError if the value is cannot be used as the missing_value for - a categorical_dtype Term. - """ - label_types = LabelArray.SUPPORTED_SCALAR_TYPES - if not isinstance(value, label_types): - raise TypeError( - "String-dtype classifiers can only produce {types}.".format( - types=" or ".join([t.__name__ for t in label_types]) - ) - ) - - -def _coerce_to_dtype(value, dtype): - if dtype == categorical_dtype: - # This check is necessary because we use object dtype for - # categoricals, and numpy will allow us to promote numerical - # values to object even though we don't support them. - _assert_valid_categorical_missing_value(value) - return value - else: - # For any other type, cast using the same rules as numpy's astype - # function with casting='same_kind'. - # - # 'same_kind' allows casting between things like float32 and float64, - # but not between str and int. Note that the name is somewhat - # misleading, since it does allow conversion between different dtype - # kinds in some cases. In particular, conversion from int to float is - # allowed. - return array([value]).astype(dtype=dtype, casting="same_kind")[0] diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/__init__.py b/ziplime/pipeline/terms/__init__.py similarity index 100% rename from ziplime/data/data_sources/grpc/grpc_stubs/__init__.py rename to ziplime/pipeline/terms/__init__.py diff --git a/ziplime/pipeline/terms/asset_exists.py b/ziplime/pipeline/terms/asset_exists.py new file mode 100644 index 0000000000..4010decbe5 --- /dev/null +++ b/ziplime/pipeline/terms/asset_exists.py @@ -0,0 +1,39 @@ +from ziplime.pipeline.terms.term import Term +from ziplime.utils.numpy_utils import bool_dtype + + +class AssetExists(Term): + """ + Pseudo-filter describing whether or not an asset existed on a given day. + This is the default mask for all terms that haven't been passed a mask + explicitly. + + This is morally a Filter, in the sense that it produces a boolean value for + every asset on every date. We don't subclass Filter, however, because + `AssetExists` is computed directly by the PipelineEngine. + + This term is guaranteed to be available as an input for any term computed + by SimplePipelineEngine.run_pipeline(). + + See Also + -------- + ziplime.assets.AssetFinder.lifetimes + """ + + dtype = bool_dtype + dataset = None + inputs = () + dependencies = {} + mask = None + windowed = False + + def __repr__(self): + return "AssetExists()" + + graph_repr = __repr__ + + def _compute(self, today, assets, out): + raise NotImplementedError( + "AssetExists cannot be computed directly." + " Check your PipelineEngine configuration." + ) \ No newline at end of file diff --git a/ziplime/pipeline/terms/bound_column.py b/ziplime/pipeline/terms/bound_column.py new file mode 100644 index 0000000000..0b29a26d7e --- /dev/null +++ b/ziplime/pipeline/terms/bound_column.py @@ -0,0 +1,273 @@ + +from toolz import first +from ziplime.currency import Currency +from ziplime.data.fx import DEFAULT_FX_RATE +from ziplime.pipeline.terms.asset_exists import AssetExists + +from ziplime.pipeline.terms.classifiers import Classifier, Latest as LatestClassifier +from ziplime.pipeline.domain import GENERIC +from ziplime.pipeline.terms.loadable_term import LoadableTerm +from ziplime.utils.numpy_utils import float64_dtype + + +class BoundColumn(LoadableTerm): + """ + A column of data that's been concretely bound to a particular dataset. + + Attributes + ---------- + dtype : numpy.dtype + The dtype of data produced when this column is loaded. + latest : ziplime.pipeline.LoadableTerm + A :class:`~ziplime.pipeline.Filter`, :class:`~ziplime.pipeline.Factor`, + or :class:`~ziplime.pipeline.Classifier` computing the most recently + known value of this column on each date. + See :class:`ziplime.pipeline.mixins.LatestMixin` for more details. + dataset : ziplime.pipeline.data.DataSet + The dataset to which this column is bound. + name : str + The name of this column. + metadata : dict + Extra metadata associated with this column. + currency_aware : bool + Whether or not this column produces currency-denominated data. + + Notes + ----- + Instances of this class are dynamically created upon access to attributes + of :class:`~ziplime.pipeline.data.DataSet`. For example, + :attr:`~ziplime.pipeline.data.EquityPricing.close` is an instance of this + class. Pipeline API users should never construct instances of this + directly. + """ + + mask = AssetExists() + window_safe = True + + def __new__( + cls, + dtype, + missing_value, + dataset, + name, + doc, + metadata, + currency_conversion, + currency_aware, + ): + if currency_aware and dtype != float64_dtype: + raise AssertionError( + "The {} column on dataset {} cannot be constructed with " + "currency_aware={}, dtype={}. Currency aware columns must " + "have a float64 dtype.".format( + name, + dataset, + currency_aware, + dtype, + ) + ) + + return super(BoundColumn, cls).__new__( + cls, + domain=dataset.domain, + dtype=dtype, + missing_value=missing_value, + dataset=dataset, + name=name, + ndim=dataset.ndim, + doc=doc, + metadata=metadata, + currency_conversion=currency_conversion, + currency_aware=currency_aware, + ) + + def _init( + self, + dataset, + name, + doc, + metadata, + currency_conversion, + currency_aware, + *args, + **kwargs, + ): + self._dataset = dataset + self._name = name + self.__doc__ = doc + self._metadata = metadata + self._currency_conversion = currency_conversion + self._currency_aware = currency_aware + return super(BoundColumn, self)._init(*args, **kwargs) + + @classmethod + def _static_identity( + cls, + dataset, + name, + doc, + metadata, + currency_conversion, + currency_aware, + *args, + **kwargs, + ): + return ( + super(BoundColumn, cls)._static_identity(*args, **kwargs), + dataset, + name, + doc, + frozenset(sorted(metadata.items(), key=first)), + currency_conversion, + currency_aware, + ) + + def __lt__(self, other): + msg = "Can't compare '{}' with '{}'. (Did you mean to use '.latest'?)" + raise TypeError(msg.format(self.qualname, other.__class__.__name__)) + + __gt__ = __le__ = __ge__ = __lt__ + + def _replace(self, **kwargs): + kw = dict( + dtype=self.dtype, + missing_value=self.missing_value, + dataset=self._dataset, + name=self._name, + doc=self.__doc__, + metadata=self._metadata, + currency_conversion=self._currency_conversion, + currency_aware=self._currency_aware, + ) + kw.update(kwargs) + + return type(self)(**kw) + + def specialize(self, domain): + """Specialize ``self`` to a concrete domain.""" + if domain == self.domain: + return self + + return self._replace(dataset=self._dataset.specialize(domain)) + + def unspecialize(self): + """ + Unspecialize a column to its generic form. + + This is equivalent to ``column.specialize(GENERIC)``. + """ + return self.specialize(GENERIC) + + def fx(self, currency: str | Currency): + """ + Construct a currency-converted version of this column. + + Parameters + ---------- + currency : str or ziplime.currency.Currency + Currency into which to convert this column's data. + + Returns + ------- + column : BoundColumn + Column producing the same data as ``self``, but currency-converted + into ``currency``. + """ + from ziplime.pipeline.data.dataset import CurrencyConversion + + conversion = self._currency_conversion + + if not self._currency_aware: + raise TypeError( + "The .fx() method cannot be called on {} because it does not " + "produce currency-denominated data.".format(self.qualname) + ) + elif conversion is not None and conversion.currency == currency: + return self + + return self._replace( + currency_conversion=CurrencyConversion( + currency=currency, + field=DEFAULT_FX_RATE, + ) + ) + + @property + def currency_conversion(self): + """Specification for currency conversions applied for this term.""" + return self._currency_conversion + + @property + def currency_aware(self): + """ + Whether or not this column produces currency-denominated data. + """ + return self._currency_aware + + @property + def dataset(self): + """ + The dataset to which this column is bound. + """ + return self._dataset + + @property + def name(self): + """ + The name of this column. + """ + return self._name + + @property + def metadata(self): + """ + A copy of the metadata for this column. + """ + return self._metadata.copy() + + @property + def qualname(self): + """The fully-qualified name of this column.""" + out = ".".join([self.dataset.qualname, self.name]) + conversion = self._currency_conversion + if conversion is not None: + out += ".fx({!r})".format(conversion.currency.code) + return out + + @property + def latest(self): + from ziplime.pipeline.terms.factors import Factor, Latest as LatestFactor + from ziplime.pipeline.terms.filters import Filter, Latest as LatestFilter + + dtype = self.dtype + if dtype in Filter.ALLOWED_DTYPES: + Latest = LatestFilter + elif dtype in Classifier.ALLOWED_DTYPES: + Latest = LatestClassifier + else: + assert dtype in Factor.ALLOWED_DTYPES, "Unknown dtype %s." % dtype + Latest = LatestFactor + + return Latest( + inputs=(self,), + dtype=dtype, + missing_value=self.missing_value, + ndim=self.ndim, + ) + + def __repr__(self): + return "{qualname}::{dtype}".format( + qualname=self.qualname, + dtype=self.dtype.name, + ) + + def graph_repr(self): + """Short repr to use when rendering Pipeline graphs.""" + # Graphviz interprets `\l` as "divide label into lines, left-justified" + return "BoundColumn:\\l Dataset: {}\\l Column: {}\\l".format( + self.dataset.__name__, self.name + ) + + def recursive_repr(self): + """Short repr used to render in recursive contexts.""" + return self.qualname diff --git a/ziplime/pipeline/terms/classifiers/__init__.py b/ziplime/pipeline/terms/classifiers/__init__.py new file mode 100644 index 0000000000..a978ea8050 --- /dev/null +++ b/ziplime/pipeline/terms/classifiers/__init__.py @@ -0,0 +1,13 @@ +from .classifier import Classifier +from .custom_classifier import CustomClassifier +from .everything import Everything +from .latest import Latest +from .quantiles import Quantiles + +__all__ = [ + "Classifier", + "CustomClassifier", + "Everything", + "Latest", + "Quantiles", +] diff --git a/ziplime/pipeline/classifiers/classifier.py b/ziplime/pipeline/terms/classifiers/classifier.py similarity index 72% rename from ziplime/pipeline/classifiers/classifier.py rename to ziplime/pipeline/terms/classifiers/classifier.py index 88843e11ae..29254e7732 100644 --- a/ziplime/pipeline/classifiers/classifier.py +++ b/ziplime/pipeline/terms/classifiers/classifier.py @@ -7,18 +7,13 @@ import operator import re -from numpy import where, isnan, nan, zeros import pandas as pd -from ziplime.errors import UnsupportedDataType from ziplime.lib.labelarray import LabelArray -from ziplime.lib.quantiles import quantiles from ziplime.pipeline.dtypes import ( CLASSIFIER_DTYPES, - FACTOR_DTYPES, - FILTER_DTYPES, ) -from ziplime.pipeline.term import ComputableTerm +from ziplime.pipeline.terms.computable_term import ComputableTerm from ziplime.utils.compat import unicode from ziplime.utils.numpy_utils import ( categorical_dtype, @@ -26,24 +21,11 @@ vectorized_is_element, ) -from ..filters import ArrayPredicate, NumExprFilter -from ..mixins import ( - CustomTermMixin, - LatestMixin, - PositiveWindowLengthMixin, +from ziplime.pipeline.terms.filters import ArrayPredicate, NumExprFilter +from ziplime.pipeline.mixins import ( RestrictedDTypeMixin, - SingleInputMixin, - StandardOutputs, ) -# string_classifiers_only = restrict_to_dtype( -# dtype=categorical_dtype, -# message_template=( -# "{method_name}() is only defined on Classifiers producing strings" -# " but it was called on a Classifier of dtype {received_dtype}." -# ), -# ) - class Classifier(RestrictedDTypeMixin, ComputableTerm): """ @@ -415,158 +397,3 @@ def peer_count(self, mask=None): return PeerCount(inputs=[self], mask=mask) - -class Everything(Classifier): - """ - A trivial classifier that classifies everything the same. - """ - - dtype = int64_dtype - window_length = 0 - inputs = () - missing_value = -1 - - def _compute(self, arrays, dates, assets, mask): - return where( - mask, - zeros(shape=mask.shape, dtype=int64_dtype), - self.missing_value, - ) - - -class Quantiles(SingleInputMixin, Classifier): - """ - A classifier computing quantiles over an input. - """ - - params = ("bins",) - dtype = int64_dtype - window_length = 0 - missing_value = -1 - - def _compute(self, arrays, dates, assets, mask): - data = arrays[0] - bins = self.params["bins"] - to_bin = where(mask, data, nan) - result = quantiles(to_bin, bins) - # Write self.missing_value into nan locations, whether they were - # generated by our input mask or not. - result[isnan(result)] = self.missing_value - return result.astype(int64_dtype) - - def graph_repr(self): - """Short repr to use when rendering Pipeline graphs.""" - return type(self).__name__ + "(%d)" % self.params["bins"] - - -class Relabel(SingleInputMixin, Classifier): - """ - A classifier applying a relabeling function on the result of another - classifier. - - Parameters - ---------- - arg : ziplime.pipeline.Classifier - Term produceing the input to be relabeled. - relabel_func : function(LabelArray) -> LabelArray - Function to apply to the result of `term`. - """ - - window_length = 0 - params = ("relabeler",) - - # TODO: Support relabeling for integer dtypes. - def __new__(cls, term: categorical_dtype, relabeler): - return super(Relabel, cls).__new__( - cls, - inputs=(term,), - dtype=term.dtype, - mask=term.mask, - relabeler=relabeler, - ) - - def _compute(self, arrays, dates, assets, mask): - relabeler = self.params["relabeler"] - data = arrays[0] - - if isinstance(data, LabelArray): - result = data.map(relabeler) - result[~mask] = data.missing_value - else: - raise NotImplementedError( - "Relabeling is not currently supported for " "int-dtype classifiers." - ) - return result - - -class CustomClassifier( - PositiveWindowLengthMixin, StandardOutputs, CustomTermMixin, Classifier -): - """ - Base class for user-defined Classifiers. - - Does not suppport multiple outputs. - - See Also - -------- - ziplime.pipeline.CustomFactor - ziplime.pipeline.CustomFilter - """ - - def _validate(self): - try: - super(CustomClassifier, self)._validate() - except UnsupportedDataType as exc: - if self.dtype in FACTOR_DTYPES: - raise UnsupportedDataType( - typename=type(self).__name__, - dtype=self.dtype, - hint="Did you mean to create a CustomFactor?", - ) from exc - elif self.dtype in FILTER_DTYPES: - raise UnsupportedDataType( - typename=type(self).__name__, - dtype=self.dtype, - hint="Did you mean to create a CustomFilter?", - ) from exc - raise - - def _allocate_output(self, windows, shape): - """ - Override the default array allocation to produce a LabelArray when we - have a string-like dtype. - """ - if self.dtype == int64_dtype: - return super(CustomClassifier, self)._allocate_output( - windows, - shape, - ) - - # This is a little bit of a hack. We might not know what the - # categories for a LabelArray are until it's actually been loaded, so - # we need to look at the underlying data. - return windows[0].data.empty_like(shape) - - -class Latest(LatestMixin, CustomClassifier): - """ - A classifier producing the latest value of an input. - - See Also - -------- - ziplime.pipeline.data.dataset.BoundColumn.latest - """ - - pass - - -class InvalidClassifierComparison(TypeError): - def __init__(self, classifier, compval): - super(InvalidClassifierComparison, self).__init__( - "Can't compare classifier of dtype" - " {dtype} to value {value} of type {type}.".format( - dtype=classifier.dtype, - value=compval, - type=type(compval).__name__, - ) - ) diff --git a/ziplime/pipeline/terms/classifiers/custom_classifier.py b/ziplime/pipeline/terms/classifiers/custom_classifier.py new file mode 100644 index 0000000000..445b9a5e78 --- /dev/null +++ b/ziplime/pipeline/terms/classifiers/custom_classifier.py @@ -0,0 +1,64 @@ +from ziplime.errors import UnsupportedDataType +from ziplime.pipeline.dtypes import ( + FACTOR_DTYPES, + FILTER_DTYPES, +) +from ziplime.pipeline.terms.classifiers.classifier import Classifier +from ziplime.utils.numpy_utils import ( + int64_dtype, +) + +from ziplime.pipeline.mixins import ( + CustomTermMixin, + PositiveWindowLengthMixin, + StandardOutputs, +) + + +class CustomClassifier( + PositiveWindowLengthMixin, StandardOutputs, CustomTermMixin, Classifier +): + """ + Base class for user-defined Classifiers. + + Does not suppport multiple outputs. + + See Also + -------- + ziplime.pipeline.CustomFactor + ziplime.pipeline.CustomFilter + """ + + def _validate(self): + try: + super(CustomClassifier, self)._validate() + except UnsupportedDataType as exc: + if self.dtype in FACTOR_DTYPES: + raise UnsupportedDataType( + typename=type(self).__name__, + dtype=self.dtype, + hint="Did you mean to create a CustomFactor?", + ) from exc + elif self.dtype in FILTER_DTYPES: + raise UnsupportedDataType( + typename=type(self).__name__, + dtype=self.dtype, + hint="Did you mean to create a CustomFilter?", + ) from exc + raise + + def _allocate_output(self, windows, shape): + """ + Override the default array allocation to produce a LabelArray when we + have a string-like dtype. + """ + if self.dtype == int64_dtype: + return super(CustomClassifier, self)._allocate_output( + windows, + shape, + ) + + # This is a little bit of a hack. We might not know what the + # categories for a LabelArray are until it's actually been loaded, so + # we need to look at the underlying data. + return windows[0].data.empty_like(shape) diff --git a/ziplime/pipeline/terms/classifiers/everything.py b/ziplime/pipeline/terms/classifiers/everything.py new file mode 100644 index 0000000000..fdf1a36ab9 --- /dev/null +++ b/ziplime/pipeline/terms/classifiers/everything.py @@ -0,0 +1,25 @@ +from numpy import where, zeros + +from ziplime.pipeline.terms.classifiers.classifier import Classifier +from ziplime.utils.numpy_utils import ( + int64_dtype, +) + + + +class Everything(Classifier): + """ + A trivial classifier that classifies everything the same. + """ + + dtype = int64_dtype + window_length = 0 + inputs = () + missing_value = -1 + + def _compute(self, arrays, dates, assets, mask): + return where( + mask, + zeros(shape=mask.shape, dtype=int64_dtype), + self.missing_value, + ) diff --git a/ziplime/pipeline/terms/classifiers/latest.py b/ziplime/pipeline/terms/classifiers/latest.py new file mode 100644 index 0000000000..fdee403ced --- /dev/null +++ b/ziplime/pipeline/terms/classifiers/latest.py @@ -0,0 +1,14 @@ +from ziplime.pipeline.mixins import LatestMixin +from ziplime.pipeline.terms.classifiers.custom_classifier import CustomClassifier + + +class Latest(LatestMixin, CustomClassifier): + """ + A classifier producing the latest value of an input. + + See Also + -------- + ziplime.pipeline.data.dataset.BoundColumn.latest + """ + + pass diff --git a/ziplime/pipeline/terms/classifiers/quantiles.py b/ziplime/pipeline/terms/classifiers/quantiles.py new file mode 100644 index 0000000000..17d9706951 --- /dev/null +++ b/ziplime/pipeline/terms/classifiers/quantiles.py @@ -0,0 +1,36 @@ +from numpy import where, isnan, nan +from ziplime.lib.quantiles import quantiles +from ziplime.pipeline.terms.classifiers.classifier import Classifier +from ziplime.utils.numpy_utils import ( + int64_dtype, +) + +from ziplime.pipeline.mixins import ( + SingleInputMixin, +) + + +class Quantiles(SingleInputMixin, Classifier): + """ + A classifier computing quantiles over an input. + """ + + params = ("bins",) + dtype = int64_dtype + window_length = 0 + missing_value = -1 + + def _compute(self, arrays, dates, assets, mask): + data = arrays[0] + bins = self.params["bins"] + to_bin = where(mask, data, nan) + result = quantiles(to_bin, bins) + # Write self.missing_value into nan locations, whether they were + # generated by our input mask or not. + result[isnan(result)] = self.missing_value + return result.astype(int64_dtype) + + def graph_repr(self): + """Short repr to use when rendering Pipeline graphs.""" + return type(self).__name__ + "(%d)" % self.params["bins"] + diff --git a/ziplime/pipeline/terms/classifiers/relabel.py b/ziplime/pipeline/terms/classifiers/relabel.py new file mode 100644 index 0000000000..5d382cb534 --- /dev/null +++ b/ziplime/pipeline/terms/classifiers/relabel.py @@ -0,0 +1,49 @@ +from ziplime.lib.labelarray import LabelArray +from ziplime.pipeline.terms.classifiers import Classifier +from ziplime.utils.numpy_utils import ( + categorical_dtype, +) + +from ziplime.pipeline.mixins import ( + SingleInputMixin, +) + + +class Relabel(SingleInputMixin, Classifier): + """ + A classifier applying a relabeling function on the result of another + classifier. + + Parameters + ---------- + arg : ziplime.pipeline.Classifier + Term produceing the input to be relabeled. + relabel_func : function(LabelArray) -> LabelArray + Function to apply to the result of `term`. + """ + + window_length = 0 + params = ("relabeler",) + + # TODO: Support relabeling for integer dtypes. + def __new__(cls, term: categorical_dtype, relabeler): + return super(Relabel, cls).__new__( + cls, + inputs=(term,), + dtype=term.dtype, + mask=term.mask, + relabeler=relabeler, + ) + + def _compute(self, arrays, dates, assets, mask): + relabeler = self.params["relabeler"] + data = arrays[0] + + if isinstance(data, LabelArray): + result = data.map(relabeler) + result[~mask] = data.missing_value + else: + raise NotImplementedError( + "Relabeling is not currently supported for " "int-dtype classifiers." + ) + return result diff --git a/ziplime/pipeline/terms/computable_term.py b/ziplime/pipeline/terms/computable_term.py new file mode 100644 index 0000000000..6bb69f7d86 --- /dev/null +++ b/ziplime/pipeline/terms/computable_term.py @@ -0,0 +1,472 @@ +from abc import abstractmethod +from bisect import insort + +from numpy import ( + record, + ndarray, +) +from ziplime.errors import ( + InvalidOutputName, + NonWindowSafeInput, + NonPipelineInputs, + TermInputsNotSpecified, + TermOutputsEmpty, + WindowLengthNotSpecified, +) +from ziplime.pipeline.terms.term import Term +from ziplime.pipeline.domain import Domain +from ziplime.pipeline.domain import infer_domain +from ziplime.pipeline.downsample_helpers import SUPPORTED_DOWNSAMPLE_FREQUENCIES +from ziplime.pipeline.terms.asset_exists import AssetExists +from ziplime.pipeline.terms.loadable_term import LoadableTerm +from ziplime.pipeline.terms.utils import _coerce_to_dtype +from ziplime.utils.numpy_utils import ( + bool_dtype, + float64_dtype, +) +from ziplime.utils.sharedoc import ( + templated_docstring, + PIPELINE_ALIAS_NAME_DOC, + PIPELINE_DOWNSAMPLING_FREQUENCY_DOC, +) + + +class ComputableTerm(Term): + """ + A Term that should be computed from a tuple of inputs. + + This is the base class for :class:`ziplime.pipeline.Factor`, + :class:`ziplime.pipeline.Filter`, and :class:`ziplime.pipeline.Classifier`. + """ + + inputs = None + outputs = None + window_length = None + mask = None + domain = None + + def __new__( + cls, + inputs=inputs, + outputs=outputs, + window_length=window_length, + mask=mask, + domain=domain, + *args, + **kwargs, + ): + + if inputs is None: + inputs = cls.inputs + + # Having inputs = None is an error, but we handle it later + # in self._validate rather than here. + if inputs is not None: + # Allow users to specify lists as class-level defaults, but + # normalize to a tuple so that inputs is hashable. + inputs = tuple(inputs) + + # Make sure all our inputs are valid pipeline objects before trying + # to infer a domain. + non_terms = [t for t in inputs if not isinstance(t, Term)] + if non_terms: + raise NonPipelineInputs(cls.__name__, non_terms) + + if domain is None: + domain = infer_domain(inputs) + + if outputs is None: + outputs = cls.outputs + if outputs is not None: + outputs = tuple(outputs) + + if mask is None: + mask = cls.mask + if mask is None: + mask = AssetExists() + + if window_length is None: + window_length = cls.window_length + + return super(ComputableTerm, cls).__new__( + cls, + inputs=inputs, + outputs=outputs, + mask=mask, + window_length=window_length, + domain=domain, + *args, + **kwargs, + ) + + def _init(self, inputs, outputs, window_length, mask, *args, **kwargs): + self.inputs = inputs + self.outputs = outputs + self.window_length = window_length + self.mask = mask + return super(ComputableTerm, self)._init(*args, **kwargs) + + @classmethod + def _static_identity(cls, inputs, outputs, window_length, mask, *args, **kwargs): + return ( + super(ComputableTerm, cls)._static_identity(*args, **kwargs), + inputs, + outputs, + window_length, + mask, + ) + + def _validate(self): + super(ComputableTerm, self)._validate() + + # Check inputs. + if self.inputs is None: + raise TermInputsNotSpecified(termname=type(self).__name__) + + if not isinstance(self.domain, Domain): + raise TypeError( + "Expected {}.domain to be an instance of Domain, " + "but got {}.".format(type(self).__name__, type(self.domain)) + ) + + # Check outputs. + if self.outputs is None: + pass + elif not self.outputs: + raise TermOutputsEmpty(termname=type(self).__name__) + else: + # Raise an exception if there are any naming conflicts between the + # term's output names and certain attributes. + disallowed_names = [ + attr for attr in dir(ComputableTerm) if not attr.startswith("_") + ] + + # The name 'compute' is an added special case that is disallowed. + # Use insort to add it to the list in alphabetical order. + insort(disallowed_names, "compute") + + for output in self.outputs: + if output.startswith("_") or output in disallowed_names: + raise InvalidOutputName( + output_name=output, + termname=type(self).__name__, + disallowed_names=disallowed_names, + ) + + if self.window_length is None: + raise WindowLengthNotSpecified(termname=type(self).__name__) + + if self.mask is None: + # This isn't user error, this is a bug in our code. + raise AssertionError("{term} has no mask".format(term=self)) + + if self.window_length > 1: + for child in self.inputs: + if not child.window_safe: + raise NonWindowSafeInput(parent=self, child=child) + + def _compute(self, inputs, dates, assets, mask): + """ + Subclasses should implement this to perform actual computation. + + This is named ``_compute`` rather than just ``compute`` because + ``compute`` is reserved for user-supplied functions in + CustomFilter/CustomFactor/CustomClassifier. + """ + raise NotImplementedError("_compute") + + # NOTE: This is a method rather than a property because ABCMeta tries to + # access all abstract attributes of its child classes to see if + # they've been implemented. These accesses happen during subclass + # creation, before the new subclass has been bound to a name in its + # defining scope. Filter, Factor, and Classifier each implement this + # method to return themselves, but if the method is invoked before + # class definition is finished (which happens if this is a property), + # they fail with a NameError. + @classmethod + @abstractmethod + def _principal_computable_term_type(cls): + """ + Return the "principal" type for a ComputableTerm. + + This returns either Filter, Factor, or Classifier, depending on the + type of ``cls``. It is used to implement behaviors like ``downsample`` + and ``if_then_else`` that are implemented on all ComputableTerms, but + that need to produce different output types depending on the type of + the receiver. + """ + raise NotImplementedError("_principal_computable_term_type") + + @property + def windowed(self): + """ + Whether or not this term represents a trailing window computation. + + If term.windowed is truthy, its compute_from_windows method will be + called with instances of AdjustedArray as inputs. + + If term.windowed is falsey, its compute_from_baseline will be called + with instances of np.ndarray as inputs. + """ + return self.window_length is not None and self.window_length > 0 + + @property + def dependencies(self): + """ + The number of extra rows needed for each of our inputs to compute this + term. + """ + extra_input_rows = max(0, self.window_length - 1) + out = {} + for term in self.inputs: + out[term] = extra_input_rows + out[self.mask] = 0 + return out + + def postprocess(self, data: ndarray): + """ + Called with an result of ``self``, unravelled (i.e. 1-dimensional) + after any user-defined screens have been applied. + + This is mostly useful for transforming the dtype of an output, e.g., to + convert a LabelArray into a pandas Categorical. + + The default implementation is to just return data unchanged. + """ + # starting with pandas 1.4, record arrays are no longer supported as DataFrame columns + if isinstance(data[0], record): + return [tuple(r) for r in data] + return data + + def to_workspace_value(self, result, assets): + """ + Called with a column of the result of a pipeline. This needs to put + the data into a format that can be used in a workspace to continue + doing computations. + + Parameters + ---------- + result : pd.Series + A multiindexed series with (dates, assets) whose values are the + results of running this pipeline term over the dates. + assets : pd.Index + All of the assets being requested. This allows us to correctly + shape the workspace value. + + Returns + ------- + workspace_value : array-like + An array like value that the engine can consume. + """ + return ( + result.unstack() + .fillna(self.missing_value) + .reindex(columns=assets, fill_value=self.missing_value) + .values + ) + + @templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC) + def downsample(self, frequency): + """ + Make a term that computes from ``self`` at lower-than-daily frequency. + + Parameters + ---------- + {frequency} + """ + from ..mixins import DownsampledMixin + if frequency not in SUPPORTED_DOWNSAMPLE_FREQUENCIES: + raise ValueError( + "Invalid downsampling frequency: {frequency}.\n\n" + "Valid downsampling frequencies are: {valid_frequencies}".format( + frequency=frequency, + valid_frequencies=", ".join( + sorted(SUPPORTED_DOWNSAMPLE_FREQUENCIES) + ), + ) + ) + downsampled_type = type(self)._with_mixin(DownsampledMixin) + return downsampled_type(term=self, frequency=frequency) + + @templated_docstring(name=PIPELINE_ALIAS_NAME_DOC) + def alias(self, name): + """ + Make a term from ``self`` that names the expression. + + Parameters + ---------- + {name} + + Returns + ------- + aliased : Aliased + ``self`` with a name. + + Notes + ----- + This is useful for giving a name to a numerical or boolean expression. + """ + from ..mixins import AliasedMixin + + aliased_type = type(self)._with_mixin(AliasedMixin) + return aliased_type(term=self, name=name) + + def isnull(self): + """ + A Filter producing True for values where this Factor has missing data. + + Equivalent to self.isnan() when ``self.dtype`` is float64. + Otherwise equivalent to ``self.eq(self.missing_value)``. + + Returns + ------- + filter : ziplime.pipeline.Filter + """ + if self.dtype == bool_dtype: + raise TypeError("isnull() is not supported for Filters") + + from .filters import NullFilter + + if self.dtype == float64_dtype: + # Using isnan is more efficient when possible because we can fold + # the isnan computation with other NumExpr expressions. + return self.isnan() + else: + return NullFilter(self) + + def notnull(self): + """ + A Filter producing True for values where this Factor has complete data. + + Equivalent to ``~self.isnan()` when ``self.dtype`` is float64. + Otherwise equivalent to ``(self != self.missing_value)``. + + Returns + ------- + filter : ziplime.pipeline.Filter + """ + if self.dtype == bool_dtype: + raise TypeError("notnull() is not supported for Filters") + + from .filters import NotNullFilter + + return NotNullFilter(self) + + def fillna(self, fill_value): + """ + Create a new term that fills missing values of this term's output with + ``fill_value``. + + Parameters + ---------- + fill_value : ziplime.pipeline.ComputableTerm, or object. + Object to use as replacement for missing values. + + If a ComputableTerm (e.g. a Factor) is passed, that term's results + will be used as fill values. + + If a scalar (e.g. a number) is passed, the scalar will be used as a + fill value. + + Examples + -------- + + **Filling with a Scalar:** + + Let ``f`` be a Factor which would produce the following output:: + + AAPL MSFT MCD BK + 2017-03-13 1.0 NaN 3.0 4.0 + 2017-03-14 1.5 2.5 NaN NaN + + Then ``f.fillna(0)`` produces the following output:: + + AAPL MSFT MCD BK + 2017-03-13 1.0 0.0 3.0 4.0 + 2017-03-14 1.5 2.5 0.0 0.0 + + **Filling with a Term:** + + Let ``f`` be as above, and let ``g`` be another Factor which would + produce the following output:: + + AAPL MSFT MCD BK + 2017-03-13 10.0 20.0 30.0 40.0 + 2017-03-14 15.0 25.0 35.0 45.0 + + Then, ``f.fillna(g)`` produces the following output:: + + AAPL MSFT MCD BK + 2017-03-13 1.0 20.0 3.0 4.0 + 2017-03-14 1.5 2.5 35.0 45.0 + + Returns + ------- + filled : ziplime.pipeline.ComputableTerm + A term computing the same results as ``self``, but with missing + values filled in using values from ``fill_value``. + """ + if self.dtype == bool_dtype: + raise TypeError("fillna() is not supported for Filters") + + if isinstance(fill_value, LoadableTerm): + raise TypeError( + "Can't use expression {} as a fill value. Did you mean to " + "append '.latest?'".format(fill_value) + ) + elif isinstance(fill_value, ComputableTerm): + if_false = fill_value + else: + # Assume we got a scalar value. Make sure it's compatible with our + # dtype. + try: + fill_value = _coerce_to_dtype(fill_value, self.dtype) + except TypeError as exc: + raise TypeError( + "Fill value {value!r} is not a valid choice " + "for term {termname} with dtype {dtype}.\n\n" + "Coercion attempt failed with: {error}".format( + termname=type(self).__name__, + value=fill_value, + dtype=self.dtype, + error=exc, + ) + ) from exc + + if_false = self._constant_type( + const=fill_value, + dtype=self.dtype, + missing_value=self.missing_value, + ) + + return self.notnull().if_else(if_true=self, if_false=if_false) + + # @classlazyval + @property + def _constant_type(cls): + from ..mixins import ConstantMixin + + return cls._with_mixin(ConstantMixin) + + # @classlazyval + @property + def _if_else_type(cls): + from ..mixins import IfElseMixin + + return cls._with_mixin(IfElseMixin) + + def __repr__(self): + return ("{type}([{inputs}], {window_length})").format( + type=type(self).__name__, + inputs=", ".join(i.recursive_repr() for i in self.inputs), + window_length=self.window_length, + ) + + def recursive_repr(self): + return type(self).__name__ + "(...)" + + @classmethod + def _with_mixin(cls, mixin_type): + return mixin_type.universal_mixin_specialization( + cls._principal_computable_term_type(), + ) diff --git a/ziplime/pipeline/terms/factors/__init__.py b/ziplime/pipeline/terms/factors/__init__.py new file mode 100644 index 0000000000..c47d3beae5 --- /dev/null +++ b/ziplime/pipeline/terms/factors/__init__.py @@ -0,0 +1,73 @@ +from .basic.annualized_volatility import AnnualizedVolatility +from .basic.average_dollar_volume import AverageDollarVolume +from .basic.daily_returns import DailyReturns +from .basic.exponential_weighted_moving_average import ExponentialWeightedMovingAverage +from .basic.exponential_weighted_moving_std_dev import ExponentialWeightedMovingStdDev +from .basic.linear_weighted_moving_average import LinearWeightedMovingAverage +from .basic.max_drawdown import MaxDrawdown +from .basic.peer_count import PeerCount +from .basic.percent_change import PercentChange +from .basic.returns import Returns +from .basic.simple_moving_average import SimpleMovingAverage +from .basic.vwap import VWAP +from .basic.weighted_average_value import WeightedAverageValue +from .custom_factor import CustomFactor +from .events.business_day_since_previous_event import BusinessDaysSincePreviousEvent +from .events.business_days_until_next_event import BusinessDaysUntilNextEvent +from .factor import Factor +from .grouped_row_transform import GroupedRowTransform +from .latest import Latest +from .recarray_field import RecarrayField +from .statistical.rolling_linear_regression_of_returns import RollingLinearRegressionOfReturns +from .statistical.rolling_pearson import RollingPearson +from .statistical.rolling_pearson_of_returns import RollingPearsonOfReturns +from .statistical.rolling_spearman import RollingSpearman +from .statistical.rolling_spearman_of_returns import RollingSpearmanOfReturns +from .statistical.simple_beta import SimpleBeta +from .technical.aroon import Aroon +from .technical.bollinger_bands import BollingerBands +from .technical.fast_stochastic_oscillator import FastStochasticOscillator +from .technical.ichimoku_kinko_hyo import IchimokuKinkoHyo +from .technical.moving_average_convergence_divergence_signal import MovingAverageConvergenceDivergenceSignal +from .technical.rate_of_change_percentage import RateOfChangePercentage +from .technical.rsi import RSI +from .technical.true_range import TrueRange + + +__all__ = [ + "AnnualizedVolatility", + "Aroon", + "AverageDollarVolume", + "BollingerBands", + "BusinessDaysSincePreviousEvent", + "BusinessDaysUntilNextEvent", + "CustomFactor", + "DailyReturns", + "ExponentialWeightedMovingAverage", + "ExponentialWeightedMovingStdDev", + "Factor", + "FastStochasticOscillator", + "IchimokuKinkoHyo", + "Latest", + "LinearWeightedMovingAverage", + "MACDSignal", + "MaxDrawdown", + "MovingAverageConvergenceDivergenceSignal", + "PeerCount", + "PercentChange", + "RSI", + "RateOfChangePercentage", + "RecarrayField", + "Returns", + "RollingLinearRegressionOfReturns", + "RollingPearson", + "RollingPearsonOfReturns", + "RollingSpearman", + "RollingSpearmanOfReturns", + "SimpleBeta", + "SimpleMovingAverage", + "TrueRange", + "VWAP", + "WeightedAverageValue", +] + diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/__init__.py b/ziplime/pipeline/terms/factors/basic/__init__.py similarity index 100% rename from ziplime/data/data_sources/grpc/grpc_stubs/grpc/__init__.py rename to ziplime/pipeline/terms/factors/basic/__init__.py diff --git a/ziplime/pipeline/terms/factors/basic/annualized_volatility.py b/ziplime/pipeline/terms/factors/basic/annualized_volatility.py new file mode 100644 index 0000000000..4e4519120e --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/annualized_volatility.py @@ -0,0 +1,26 @@ +from ziplime.pipeline.terms.factors.basic.returns import Returns +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor +from ziplime.utils.math_utils import nanstd + + +class AnnualizedVolatility(CustomFactor): + """ + Volatility. The degree of variation of a series over time as measured by + the standard deviation of daily returns. + https://en.wikipedia.org/wiki/Volatility_(finance) + + **Default Inputs:** [Returns(window_length=2)] + + Parameters + ---------- + annualization_factor : float, optional + The number of time units per year. Defaults is 252, the number of NYSE + trading days in a normal year. + """ + + inputs = [Returns(window_length=2)] + params = {"annualization_factor": 252.0} + window_length = 252 + + def compute(self, today, assets, out, returns, annualization_factor): + out[:] = nanstd(returns, axis=0) * (annualization_factor**0.5) \ No newline at end of file diff --git a/ziplime/pipeline/terms/factors/basic/average_dollar_volume.py b/ziplime/pipeline/terms/factors/basic/average_dollar_volume.py new file mode 100644 index 0000000000..58516c71c2 --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/average_dollar_volume.py @@ -0,0 +1,18 @@ +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor +from ziplime.utils.math_utils import nansum + + +class AverageDollarVolume(CustomFactor): + """ + Average Daily Dollar Volume + + **Default Inputs:** [EquityPricing.close, EquityPricing.volume] + + **Default Window Length:** None + """ + + inputs = [EquityPricing.close, EquityPricing.volume] + + def compute(self, today, assets, out, close, volume): + out[:] = nansum(close * volume, axis=0) / len(close) diff --git a/ziplime/pipeline/terms/factors/basic/clip.py b/ziplime/pipeline/terms/factors/basic/clip.py new file mode 100644 index 0000000000..d005c616f0 --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/clip.py @@ -0,0 +1,41 @@ +from numpy import clip + +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor + + +class Clip(CustomFactor): + """ + Clip (limit) the values in a factor. + + Given an interval, values outside the interval are clipped to the interval + edges. For example, if an interval of ``[0, 1]`` is specified, values + smaller than 0 become 0, and values larger than 1 become 1. + + **Default Window Length:** 1 + + Parameters + ---------- + min_bound : float + The minimum value to use. + max_bound : float + The maximum value to use. + + Notes + ----- + To only clip values on one side, ``-np.inf` and ``np.inf`` may be passed. + For example, to only clip the maximum value but not clip a minimum value: + + .. code-block:: python + + Clip(inputs=[factor], min_bound=-np.inf, max_bound=user_provided_max) + + See Also + -------- + numpy.clip + """ + + window_length = 1 + params = ("min_bound", "max_bound") + + def compute(self, today, assets, out, values, min_bound, max_bound): + clip(values[-1], min_bound, max_bound, out=out) diff --git a/ziplime/pipeline/terms/factors/basic/daily_returns.py b/ziplime/pipeline/terms/factors/basic/daily_returns.py new file mode 100644 index 0000000000..1c6fe0e9d3 --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/daily_returns.py @@ -0,0 +1,14 @@ +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors.basic.returns import Returns + + +class DailyReturns(Returns): + """ + Calculates daily percent change in close price. + + **Default Inputs**: [EquityPricing.close] + """ + + inputs = [EquityPricing.close] + window_safe = True + window_length = 2 diff --git a/ziplime/pipeline/terms/factors/basic/exponential_weighted_factor.py b/ziplime/pipeline/terms/factors/basic/exponential_weighted_factor.py new file mode 100644 index 0000000000..b434920302 --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/exponential_weighted_factor.py @@ -0,0 +1,164 @@ + + + +from numbers import Number +from numpy import ( + exp, + log, +) + +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor + + +class ExponentialWeightedFactor(SingleInputMixin, CustomFactor): + """ + Base class for factors implementing exponential-weighted operations. + + **Default Inputs:** None + + **Default Window Length:** None + + Parameters + ---------- + inputs : length-1 list or tuple of BoundColumn + The expression over which to compute the average. + window_length : int > 0 + Length of the lookback window over which to compute the average. + decay_rate : float, 0 < decay_rate <= 1 + Weighting factor by which to discount past observations. + + When calculating historical averages, rows are multiplied by the + sequence:: + + decay_rate, decay_rate ** 2, decay_rate ** 3, ... + + Methods + ------- + weights + from_span + from_halflife + from_center_of_mass + """ + + params = ("decay_rate",) + + @classmethod + def from_span(cls, inputs, window_length, span: Number, **kwargs): + """ + Convenience constructor for passing `decay_rate` in terms of `span`. + + Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the + behavior equivalent to passing `span` to pandas.ewma. + + Examples + -------- + .. code-block:: python + + # Equivalent to: + # my_ewma = EWMA( + # inputs=[EquityPricing.close], + # window_length=30, + # decay_rate=(1 - (2.0 / (1 + 15.0))), + # ) + my_ewma = EWMA.from_span( + inputs=[EquityPricing.close], + window_length=30, + span=15, + ) + + Notes + ----- + This classmethod is provided by both + :class:`ExponentialWeightedMovingAverage` and + :class:`ExponentialWeightedMovingStdDev`. + """ + if span <= 1: + raise ValueError("`span` must be a positive number. %s was passed." % span) + + decay_rate = 1.0 - (2.0 / (1.0 + span)) + assert 0.0 < decay_rate <= 1.0 + + return cls( + inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs + ) + + @classmethod + def from_halflife(cls, inputs, window_length, halflife: Number, **kwargs): + """ + Convenience constructor for passing ``decay_rate`` in terms of half + life. + + Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides + the behavior equivalent to passing `halflife` to pandas.ewma. + + Examples + -------- + .. code-block:: python + + # Equivalent to: + # my_ewma = EWMA( + # inputs=[EquityPricing.close], + # window_length=30, + # decay_rate=np.exp(np.log(0.5) / 15), + # ) + my_ewma = EWMA.from_halflife( + inputs=[EquityPricing.close], + window_length=30, + halflife=15, + ) + + Notes + ----- + This classmethod is provided by both + :class:`ExponentialWeightedMovingAverage` and + :class:`ExponentialWeightedMovingStdDev`. + """ + if halflife <= 0: + raise ValueError( + "`span` must be a positive number. %s was passed." % halflife + ) + decay_rate = exp(log(0.5) / halflife) + assert 0.0 < decay_rate <= 1.0 + + return cls( + inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs + ) + + @classmethod + def from_center_of_mass(cls, inputs, window_length, center_of_mass, **kwargs): + """ + Convenience constructor for passing `decay_rate` in terms of center of + mass. + + Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides + behavior equivalent to passing `center_of_mass` to pandas.ewma. + + Examples + -------- + .. code-block:: python + + # Equivalent to: + # my_ewma = EWMA( + # inputs=[EquityPricing.close], + # window_length=30, + # decay_rate=(1 - (1 / 15.0)), + # ) + my_ewma = EWMA.from_center_of_mass( + inputs=[EquityPricing.close], + window_length=30, + center_of_mass=15, + ) + + Notes + ----- + This classmethod is provided by both + :class:`ExponentialWeightedMovingAverage` and + :class:`ExponentialWeightedMovingStdDev`. + """ + return cls( + inputs=inputs, + window_length=window_length, + decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))), + **kwargs, + ) \ No newline at end of file diff --git a/ziplime/pipeline/terms/factors/basic/exponential_weighted_moving_average.py b/ziplime/pipeline/terms/factors/basic/exponential_weighted_moving_average.py new file mode 100644 index 0000000000..04858a7f4b --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/exponential_weighted_moving_average.py @@ -0,0 +1,42 @@ +from numpy import average + +from ziplime.pipeline.terms.factors.basic.exponential_weighted_factor import ExponentialWeightedFactor + + +class ExponentialWeightedMovingAverage(ExponentialWeightedFactor): + """ + Exponentially Weighted Moving Average + + **Default Inputs:** None + + **Default Window Length:** None + + Parameters + ---------- + inputs : length-1 list/tuple of BoundColumn + The expression over which to compute the average. + window_length : int > 0 + Length of the lookback window over which to compute the average. + decay_rate : float, 0 < decay_rate <= 1 + Weighting factor by which to discount past observations. + + When calculating historical averages, rows are multiplied by the + sequence:: + + decay_rate, decay_rate ** 2, decay_rate ** 3, ... + + Notes + ----- + - This class can also be imported under the name ``EWMA``. + + See Also + -------- + :meth:`pandas.DataFrame.ewm` + """ + + def compute(self, today, assets, out, data, decay_rate): + out[:] = average( + data, + axis=0, + weights=exponential_weights(len(data), decay_rate), + ) diff --git a/ziplime/pipeline/terms/factors/basic/exponential_weighted_moving_std_dev.py b/ziplime/pipeline/terms/factors/basic/exponential_weighted_moving_std_dev.py new file mode 100644 index 0000000000..7deba25808 --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/exponential_weighted_moving_std_dev.py @@ -0,0 +1,49 @@ +from numpy import ( + average, + sqrt, + sum as np_sum, +) + +from ziplime.pipeline.terms.factors.basic.exponential_weighted_factor import ExponentialWeightedFactor + + +class ExponentialWeightedMovingStdDev(ExponentialWeightedFactor): + """ + Exponentially Weighted Moving Standard Deviation + + **Default Inputs:** None + + **Default Window Length:** None + + Parameters + ---------- + inputs : length-1 list/tuple of BoundColumn + The expression over which to compute the average. + window_length : int > 0 + Length of the lookback window over which to compute the average. + decay_rate : float, 0 < decay_rate <= 1 + Weighting factor by which to discount past observations. + + When calculating historical averages, rows are multiplied by the + sequence:: + + decay_rate, decay_rate ** 2, decay_rate ** 3, ... + + Notes + ----- + - This class can also be imported under the name ``EWMSTD``. + + See Also + -------- + :func:`pandas.DataFrame.ewm` + """ + + def compute(self, today, assets, out, data, decay_rate): + weights = exponential_weights(len(data), decay_rate) + + mean = average(data, axis=0, weights=weights) + variance = average((data - mean) ** 2, axis=0, weights=weights) + + squared_weight_sum = np_sum(weights) ** 2 + bias_correction = squared_weight_sum / (squared_weight_sum - np_sum(weights**2)) + out[:] = sqrt(variance * bias_correction) diff --git a/ziplime/pipeline/terms/factors/basic/linear_weighted_moving_average.py b/ziplime/pipeline/terms/factors/basic/linear_weighted_moving_average.py new file mode 100644 index 0000000000..bdeb5f62c4 --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/linear_weighted_moving_average.py @@ -0,0 +1,36 @@ +from numpy import arange + +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor +from ziplime.utils.math_utils import nansum +from ziplime.utils.numpy_utils import float64_dtype + + +class LinearWeightedMovingAverage(SingleInputMixin, CustomFactor): + """ + Weighted Average Value of an arbitrary column + + **Default Inputs**: None + + **Default Window Length**: None + """ + + # numpy's nan functions throw warnings when passed an array containing only + # nans, but they still returns the desired value (nan), so we ignore the + # warning. + # ctx = ignore_nanwarnings() + + def compute(self, today, assets, out, data): + ndays = data.shape[0] + + # Initialize weights array + weights = arange(1, ndays + 1, dtype=float64_dtype).reshape(ndays, 1) + + # Compute normalizer + normalizer = (ndays * (ndays + 1)) / 2 + + # Weight the data + weighted_data = data * weights + + # Compute weighted averages + out[:] = nansum(weighted_data, axis=0) / normalizer diff --git a/ziplime/pipeline/terms/factors/basic/max_drawdown.py b/ziplime/pipeline/terms/factors/basic/max_drawdown.py new file mode 100644 index 0000000000..1c236f51ef --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/max_drawdown.py @@ -0,0 +1,28 @@ +import numpy as np +from numpy import fmax, isnan + +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor +from ziplime.utils.math_utils import nanargmax, nanmax + + +class MaxDrawdown(SingleInputMixin, CustomFactor): + """ + Max Drawdown + + **Default Inputs:** None + + **Default Window Length:** None + """ + + # ctx = ignore_nanwarnings() + + def compute(self, today, assets, out, data): + drawdowns = fmax.accumulate(data, axis=0) - data + drawdowns[isnan(drawdowns)] = -np.inf + drawdown_ends = nanargmax(drawdowns, axis=0) + + # TODO: Accelerate this loop in Cython or Numba. + for i, end in enumerate(drawdown_ends): + peak = nanmax(data[: end + 1, i]) + out[i] = (peak - data[end, i]) / data[end, i] diff --git a/ziplime/pipeline/terms/factors/basic/peer_count.py b/ziplime/pipeline/terms/factors/basic/peer_count.py new file mode 100644 index 0000000000..e58e1c9d26 --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/peer_count.py @@ -0,0 +1,35 @@ +from numpy import unique, copyto + +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor + + +class PeerCount(SingleInputMixin, CustomFactor): + """ + Peer Count of distinct categories in a given classifier. This factor + is returned by the classifier instance method peer_count() + + **Default Inputs:** None + + **Default Window Length:** 1 + """ + + window_length = 1 + + def _validate(self): + super(PeerCount, self)._validate() + if self.window_length != 1: + raise ValueError( + "'PeerCount' expected a window length of 1, but was given" + "{window_length}.".format(window_length=self.window_length) + ) + + def compute(self, today, assets, out, classifier_values): + # Convert classifier array to group label int array + group_labels, null_label = self.inputs[0]._to_integral(classifier_values[0]) + _, inverse, counts = unique( # Get counts, idx of unique groups + group_labels, + return_counts=True, + return_inverse=True, + ) + copyto(out, counts[inverse], where=(group_labels != null_label)) \ No newline at end of file diff --git a/ziplime/pipeline/terms/factors/basic/percent_change.py b/ziplime/pipeline/terms/factors/basic/percent_change.py new file mode 100644 index 0000000000..82b6d29ff8 --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/percent_change.py @@ -0,0 +1,33 @@ +from numpy import errstate as np_errstate +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor + + +class PercentChange(SingleInputMixin, CustomFactor): + """ + Calculates the percent change over the given window_length. + + **Default Inputs:** None + + **Default Window Length:** None + + Notes + ----- + Percent change is calculated as ``(new - old) / abs(old)``. + """ + + window_safe = True + + def _validate(self): + super(PercentChange, self)._validate() + if self.window_length < 2: + raise ValueError( + "'PercentChange' expected a window length" + "of at least 2, but was given {window_length}. " + "For daily percent change, use a window " + "length of 2.".format(window_length=self.window_length) + ) + + def compute(self, today, assets, out, values): + with np_errstate(divide="ignore", invalid="ignore"): + out[:] = (values[-1] - values[0]) / abs(values[0]) diff --git a/ziplime/pipeline/terms/factors/basic/returns.py b/ziplime/pipeline/terms/factors/basic/returns.py new file mode 100644 index 0000000000..5bbef8936e --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/returns.py @@ -0,0 +1,25 @@ +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor +from ziplime.pipeline.data import EquityPricing + + +class Returns(CustomFactor): + """ + Calculates the percent change in close price over the given window_length. + + **Default Inputs**: [EquityPricing.close] + """ + + inputs = [EquityPricing.close] + window_safe = True + + def _validate(self): + super(Returns, self)._validate() + if self.window_length < 2: + raise ValueError( + "'Returns' expected a window length of at least 2, but was " + "given {window_length}. For daily returns, use a window " + "length of 2.".format(window_length=self.window_length) + ) + + def compute(self, today, assets, out, close): + out[:] = (close[-1] - close[0]) / close[0] diff --git a/ziplime/pipeline/terms/factors/basic/simple_moving_average.py b/ziplime/pipeline/terms/factors/basic/simple_moving_average.py new file mode 100644 index 0000000000..b6224e48ac --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/simple_moving_average.py @@ -0,0 +1,22 @@ +from ziplime.utils.math_utils import nanmean + +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor + + +class SimpleMovingAverage(SingleInputMixin, CustomFactor): + """ + Average Value of an arbitrary column + + **Default Inputs**: None + + **Default Window Length**: None + """ + + # numpy's nan functions throw warnings when passed an array containing only + # nans, but they still returns the desired value (nan), so we ignore the + # warning. + # ctx = ignore_nanwarnings() + + def compute(self, today, assets, out, data): + out[:] = nanmean(data, axis=0) diff --git a/ziplime/pipeline/terms/factors/basic/vwap.py b/ziplime/pipeline/terms/factors/basic/vwap.py new file mode 100644 index 0000000000..946f60edbc --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/vwap.py @@ -0,0 +1,14 @@ +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors.basic.weighted_average_value import WeightedAverageValue + + +class VWAP(WeightedAverageValue): + """ + Volume Weighted Average Price + + **Default Inputs:** [EquityPricing.close, EquityPricing.volume] + + **Default Window Length:** None + """ + + inputs = (EquityPricing.close, EquityPricing.volume) diff --git a/ziplime/pipeline/terms/factors/basic/weighted_average_value.py b/ziplime/pipeline/terms/factors/basic/weighted_average_value.py new file mode 100644 index 0000000000..b74c7b1b55 --- /dev/null +++ b/ziplime/pipeline/terms/factors/basic/weighted_average_value.py @@ -0,0 +1,15 @@ +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor +from ziplime.utils.math_utils import nansum + + +class WeightedAverageValue(CustomFactor): + """ + Helper for VWAP-like computations. + + **Default Inputs:** None + + **Default Window Length:** None + """ + + def compute(self, today, assets, out, base, weight): + out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0) diff --git a/ziplime/pipeline/terms/factors/custom_factor.py b/ziplime/pipeline/terms/factors/custom_factor.py new file mode 100644 index 0000000000..ced3ce7bb9 --- /dev/null +++ b/ziplime/pipeline/terms/factors/custom_factor.py @@ -0,0 +1,201 @@ +from ziplime.pipeline.terms.factors.factor import Factor +from ziplime.pipeline.terms.factors.recarray_field import RecarrayField +from ziplime.errors import UnsupportedDataType +from ziplime.pipeline.dtypes import CLASSIFIER_DTYPES, FILTER_DTYPES + +from ziplime.pipeline.mixins import CustomTermMixin, PositiveWindowLengthMixin + +from ziplime.utils.numpy_utils import float64_dtype + + +class CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor): + ''' + Base class for user-defined Factors. + + Parameters + ---------- + inputs : iterable, optional + An iterable of `BoundColumn` instances (e.g. USEquityPricing.close), + describing the data to load and pass to `self.compute`. If this + argument is not passed to the CustomFactor constructor, we look for a + class-level attribute named `inputs`. + outputs : iterable[str], optional + An iterable of strings which represent the names of each output this + factor should compute and return. If this argument is not passed to the + CustomFactor constructor, we look for a class-level attribute named + `outputs`. + window_length : int, optional + Number of rows to pass for each input. If this argument is not passed + to the CustomFactor constructor, we look for a class-level attribute + named `window_length`. + mask : ziplime.pipeline.Filter, optional + A Filter describing the assets on which we should compute each day. + Each call to ``CustomFactor.compute`` will only receive assets for + which ``mask`` produced True on the day for which compute is being + called. + + Notes + ----- + Users implementing their own Factors should subclass CustomFactor and + implement a method named `compute` with the following signature: + + .. code-block:: python + + def compute(self, today, assets, out, *inputs): + ... + + On each simulation date, ``compute`` will be called with the current date, + an array of sids, an output array, and an input array for each expression + passed as inputs to the CustomFactor constructor. + + The specific types of the values passed to `compute` are as follows:: + + today : np.datetime64[ns] + Row label for the last row of all arrays passed as `inputs`. + assets : np.array[int64, ndim=1] + Column labels for `out` and`inputs`. + out : np.array[self.dtype, ndim=1] + Output array of the same shape as `assets`. `compute` should write + its desired return values into `out`. If multiple outputs are + specified, `compute` should write its desired return values into + `out.` for each output name in `self.outputs`. + *inputs : tuple of np.array + Raw data arrays corresponding to the values of `self.inputs`. + + ``compute`` functions should expect to be passed NaN values for dates on + which no data was available for an asset. This may include dates on which + an asset did not yet exist. + + For example, if a CustomFactor requires 10 rows of close price data, and + asset A started trading on Monday June 2nd, 2014, then on Tuesday, June + 3rd, 2014, the column of input data for asset A will have 9 leading NaNs + for the preceding days on which data was not yet available. + + Examples + -------- + + A CustomFactor with pre-declared defaults: + + .. code-block:: python + + class TenDayRange(CustomFactor): + """ + Computes the difference between the highest high in the last 10 + days and the lowest low. + + Pre-declares high and low as default inputs and `window_length` as + 10. + """ + + inputs = [USEquityPricing.high, USEquityPricing.low] + window_length = 10 + + def compute(self, today, assets, out, highs, lows): + from numpy import nanmin, nanmax + + highest_highs = nanmax(highs, axis=0) + lowest_lows = nanmin(lows, axis=0) + out[:] = highest_highs - lowest_lows + + + # Doesn't require passing inputs or window_length because they're + # pre-declared as defaults for the TenDayRange class. + ten_day_range = TenDayRange() + + A CustomFactor without defaults: + + .. code-block:: python + + class MedianValue(CustomFactor): + """ + Computes the median value of an arbitrary single input over an + arbitrary window.. + + Does not declare any defaults, so values for `window_length` and + `inputs` must be passed explicitly on every construction. + """ + + def compute(self, today, assets, out, data): + from numpy import nanmedian + out[:] = data.nanmedian(data, axis=0) + + # Values for `inputs` and `window_length` must be passed explicitly to + # MedianValue. + median_close10 = MedianValue([USEquityPricing.close], window_length=10) + median_low15 = MedianValue([USEquityPricing.low], window_length=15) + + A CustomFactor with multiple outputs: + + .. code-block:: python + + class MultipleOutputs(CustomFactor): + inputs = [USEquityPricing.close] + outputs = ['alpha', 'beta'] + window_length = N + + def compute(self, today, assets, out, close): + computed_alpha, computed_beta = some_function(close) + out.alpha[:] = computed_alpha + out.beta[:] = computed_beta + + # Each output is returned as its own Factor upon instantiation. + alpha, beta = MultipleOutputs() + + # Equivalently, we can create a single factor instance and access each + # output as an attribute of that instance. + multiple_outputs = MultipleOutputs() + alpha = multiple_outputs.alpha + beta = multiple_outputs.beta + + Note: If a CustomFactor has multiple outputs, all outputs must have the + same dtype. For instance, in the example above, if alpha is a float then + beta must also be a float. + ''' + + dtype = float64_dtype + + def _validate(self): + try: + super(CustomFactor, self)._validate() + except UnsupportedDataType as exc: + if self.dtype in CLASSIFIER_DTYPES: + raise UnsupportedDataType( + typename=type(self).__name__, + dtype=self.dtype, + hint="Did you mean to create a CustomClassifier?", + ) from exc + elif self.dtype in FILTER_DTYPES: + raise UnsupportedDataType( + typename=type(self).__name__, + dtype=self.dtype, + hint="Did you mean to create a CustomFilter?", + ) from exc + raise + + def __getattribute__(self, name): + outputs = object.__getattribute__(self, "outputs") + if outputs is None: + return super(CustomFactor, self).__getattribute__(name) + elif name in outputs: + return RecarrayField(factor=self, attribute=name) + else: + try: + return super(CustomFactor, self).__getattribute__(name) + except AttributeError as exc: + raise AttributeError( + "Instance of {factor} has no output named {attr!r}. " + "Possible choices are: {choices}.".format( + factor=type(self).__name__, + attr=name, + choices=self.outputs, + ) + ) from exc + + def __iter__(self): + if self.outputs is None: + raise ValueError( + "{factor} does not have multiple outputs.".format( + factor=type(self).__name__, + ) + ) + return (RecarrayField(self, attr) for attr in self.outputs) diff --git a/ziplime/pipeline/terms/factors/daily_summary.py b/ziplime/pipeline/terms/factors/daily_summary.py new file mode 100644 index 0000000000..107377842f --- /dev/null +++ b/ziplime/pipeline/terms/factors/daily_summary.py @@ -0,0 +1,58 @@ +from numpy import isnan, nan + +from ziplime.pipeline.terms.factors.factor import Factor + +from ziplime.pipeline.mixins import ( + SingleInputMixin, +) + +from ziplime.utils.numpy_utils import ( + as_column, + float64_dtype, +) + +_RANK_METHODS = frozenset(["average", "min", "max", "dense", "ordinal"]) + + +class DailySummary(SingleInputMixin, Factor): + """1D Factor that computes a summary statistic across all assets.""" + + ndim = 1 + window_length = 0 + params = ("func",) + + def __new__(cls, func, input_, mask, dtype): + # TODO: We should be able to support datetime64 as well, but that + # requires extra care for handling NaT. + if dtype != float64_dtype: + raise AssertionError( + "DailySummary only supports float64 dtype, got {}".format(dtype), + ) + + return super(DailySummary, cls).__new__( + cls, + inputs=[input_], + dtype=dtype, + missing_value=nan, + window_safe=input_.window_safe, + func=func, + mask=mask, + ) + + def _compute(self, arrays, dates, assets, mask): + func = self.params["func"] + + data = arrays[0] + data[~mask] = nan + if not isnan(self.inputs[0].missing_value): + data[data == self.inputs[0].missing_value] = nan + + return as_column(func(data, self.inputs[0].missing_value)) + + def __repr__(self): + return "{}.{}()".format( + self.inputs[0].recursive_repr(), + self.params["func"].__name__, + ) + + graph_repr = recursive_repr = __repr__ diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/__init__.py b/ziplime/pipeline/terms/factors/events/__init__.py similarity index 100% rename from ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/__init__.py rename to ziplime/pipeline/terms/factors/events/__init__.py diff --git a/ziplime/pipeline/factors/events.py b/ziplime/pipeline/terms/factors/events/business_day_since_previous_event.py similarity index 57% rename from ziplime/pipeline/factors/events.py rename to ziplime/pipeline/terms/factors/events/business_day_since_previous_event.py index d1a3cbc310..fb1257c4d6 100644 --- a/ziplime/pipeline/factors/events.py +++ b/ziplime/pipeline/terms/factors/events/business_day_since_previous_event.py @@ -1,9 +1,6 @@ -""" -Factors describing information about event data (e.g. earnings -announcements, acquisitions, dividends, etc.). -""" - from numpy import newaxis + +from ziplime.pipeline.terms.factors.factor import Factor from ziplime.utils.numpy_utils import ( NaTD, busday_count_mask_NaT, @@ -11,8 +8,6 @@ float64_dtype, ) -from .factor import Factor - class BusinessDaysSincePreviousEvent(Factor): """ @@ -64,39 +59,3 @@ def _compute(self, arrays, dates, assets, mask): # Convert row labels into a column vector for broadcasted comparison. reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis] return busday_count_mask_NaT(announce_dates, reference_dates) - - -class BusinessDaysUntilNextEvent(Factor): - """ - Abstract class for business days since a next event. - Returns the number of **business days** (not trading days!) until - the next known event date for each asset. - - This doesn't use trading days because the trading calendar includes - information that may not have been available to the algorithm at the time - when `compute` is called. - - For example, the NYSE closings September 11th 2001, would not have been - known to the algorithm on September 10th. - - Assets that announced or will announce the event today will produce a value - of 0.0. Assets that will announce the event on the next upcoming business - day will produce a value of 1.0. - - Assets for which the event date is `NaT` will produce a value of `NaN`. - """ - - window_length = 0 - dtype = float64_dtype - - def _compute(self, arrays, dates, assets, mask): - - # Coerce from [ns] to [D] for numpy busday_count. - announce_dates = arrays[0].astype(datetime64D_dtype) - - # Set masked values to NaT. - announce_dates[~mask] = NaTD - - # Convert row labels into a column vector for broadcasted comparison. - reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis] - return busday_count_mask_NaT(reference_dates, announce_dates) diff --git a/ziplime/pipeline/terms/factors/events/business_days_until_next_event.py b/ziplime/pipeline/terms/factors/events/business_days_until_next_event.py new file mode 100644 index 0000000000..743a0a7db4 --- /dev/null +++ b/ziplime/pipeline/terms/factors/events/business_days_until_next_event.py @@ -0,0 +1,52 @@ + +""" +Factors describing information about event data (e.g. earnings +announcements, acquisitions, dividends, etc.). +""" + +from numpy import newaxis + +from ziplime.pipeline.terms.factors.factor import Factor +from ziplime.utils.numpy_utils import ( + NaTD, + busday_count_mask_NaT, + datetime64D_dtype, + float64_dtype, +) + + + +class BusinessDaysUntilNextEvent(Factor): + """ + Abstract class for business days since a next event. + Returns the number of **business days** (not trading days!) until + the next known event date for each asset. + + This doesn't use trading days because the trading calendar includes + information that may not have been available to the algorithm at the time + when `compute` is called. + + For example, the NYSE closings September 11th 2001, would not have been + known to the algorithm on September 10th. + + Assets that announced or will announce the event today will produce a value + of 0.0. Assets that will announce the event on the next upcoming business + day will produce a value of 1.0. + + Assets for which the event date is `NaT` will produce a value of `NaN`. + """ + + window_length = 0 + dtype = float64_dtype + + def _compute(self, arrays, dates, assets, mask): + + # Coerce from [ns] to [D] for numpy busday_count. + announce_dates = arrays[0].astype(datetime64D_dtype) + + # Set masked values to NaT. + announce_dates[~mask] = NaTD + + # Convert row labels into a column vector for broadcasted comparison. + reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis] + return busday_count_mask_NaT(reference_dates, announce_dates) diff --git a/ziplime/pipeline/factors/factor.py b/ziplime/pipeline/terms/factors/factor.py similarity index 50% rename from ziplime/pipeline/factors/factor.py rename to ziplime/pipeline/terms/factors/factor.py index 9b5261d6b0..4e086a5068 100644 --- a/ziplime/pipeline/factors/factor.py +++ b/ziplime/pipeline/terms/factors/factor.py @@ -1,385 +1,38 @@ -""" -factor.py -""" - -import numpy as np -from operator import attrgetter -from numbers import Number -from math import ceil from textwrap import dedent -from numpy import empty_like, inf, isnan, nan, where +from numpy import inf, nan from scipy.stats import rankdata -from ziplime.utils.compat import wraps -from ziplime.errors import ( - BadPercentileBounds, - UnknownRankMethod, - UnsupportedDataType, -) -from ziplime.lib.normalize import naive_grouped_rowwise_apply -from ziplime.lib.rank import masked_rankdata_2d, rankdata_1d_descending -from ziplime.pipeline.classifiers import Classifier, Everything, Quantiles -from ziplime.pipeline.dtypes import ( - CLASSIFIER_DTYPES, - FACTOR_DTYPES, - FILTER_DTYPES, -) +from ziplime.pipeline.terms.computable_term import ComputableTerm +from ziplime.pipeline.terms.factors.utils.grouped_row_transform_utils import demean, zscore, winsorize +from ziplime.pipeline.terms.factors.utils.operators import binary_operator, reflected_binary_operator, unary_operator, \ + function_application +from ziplime.pipeline.terms.factors.utils.summary_funcs import summary_method, summary_funcs +from ziplime.pipeline.terms.term import Term +from ziplime.errors import BadPercentileBounds +from ziplime.lib.rank import rankdata_1d_descending +from ziplime.pipeline.terms.classifiers import Classifier, Quantiles +from ziplime.pipeline.dtypes import FACTOR_DTYPES + from ziplime.pipeline.expression import ( - BadBinaryOperator, COMPARISONS, - is_comparison, MATH_BINOPS, method_name_for_op, - NumericalExpression, NUMEXPR_MATH_FUNCS, UNARY_OPS, unary_op_name, ) -from ziplime.pipeline.filters import ( +from ziplime.pipeline.terms.filters import ( Filter, - NumExprFilter, PercentileFilter, MaximumFilter, ) -from ziplime.pipeline.mixins import ( - CustomTermMixin, - LatestMixin, - PositiveWindowLengthMixin, - RestrictedDTypeMixin, - SingleInputMixin, -) -from ziplime.pipeline.term import AssetExists, ComputableTerm, Term -from ziplime.utils.functional import with_doc, with_name -from ziplime.utils.math_utils import ( - nanmax, - nanmean, - nanmedian, - nanmin, - nanstd, - nansum, -) -from ziplime.utils.numpy_utils import ( - as_column, - bool_dtype, - coerce_to_dtype, - float64_dtype, - is_missing, -) -from ziplime.utils.sharedoc import templated_docstring - -_RANK_METHODS = frozenset(["average", "min", "max", "dense", "ordinal"]) - - -def coerce_numbers_to_my_dtype(f): - """ - A decorator for methods whose signature is f(self, other) that coerces - ``other`` to ``self.dtype``. - - This is used to make comparison operations between numbers and `Factor` - instances work independently of whether the user supplies a float or - integer literal. - - For example, if I write:: - - my_filter = my_factor > 3 - - my_factor probably has dtype float64, but 3 is an int, so we want to coerce - to float64 before doing the comparison. - """ - - @wraps(f) - def method(self, other): - if isinstance(other, Number): - other = coerce_to_dtype(self.dtype, other) - return f(self, other) - - return method - - -def binop_return_dtype(op, left, right): - """ - Compute the expected return dtype for the given binary operator. - - Parameters - ---------- - op : str - Operator symbol, (e.g. '+', '-', ...). - left : numpy.dtype - Dtype of left hand side. - right : numpy.dtype - Dtype of right hand side. - - Returns - ------- - outdtype : numpy.dtype - The dtype of the result of `left right`. - """ - if is_comparison(op): - if left != right: - raise TypeError( - "Don't know how to compute {left} {op} {right}.\n" - "Comparisons are only supported between Factors of equal " - "dtypes.".format(left=left, op=op, right=right) - ) - return bool_dtype - - elif left != float64_dtype or right != float64_dtype: - raise TypeError( - "Don't know how to compute {left} {op} {right}.\n" - "Arithmetic operators are only supported between Factors of " - "dtype 'float64'.".format( - left=left.name, - op=op, - right=right.name, - ) - ) - return float64_dtype - - -BINOP_DOCSTRING_TEMPLATE = """ -Construct a :class:`~ziplime.pipeline.{rtype}` computing ``self {op} other``. - -Parameters ----------- -other : ziplime.pipeline.Factor, float - Right-hand side of the expression. - -Returns -------- -{ret} -""" - -BINOP_RETURN_FILTER = """\ -filter : ziplime.pipeline.Filter - Filter computing ``self {op} other`` with the outputs of ``self`` and - ``other``. -""" - -BINOP_RETURN_FACTOR = """\ -factor : ziplime.pipeline.Factor - Factor computing ``self {op} other`` with outputs of ``self`` and - ``other``. -""" - - -def binary_operator(op): - """ - Factory function for making binary operator methods on a Factor subclass. - - Returns a function, "binary_operator" suitable for implementing functions - like __add__. - """ - # When combining a Factor with a NumericalExpression, we use this - # attrgetter instance to defer to the commuted implementation of the - # NumericalExpression operator. - commuted_method_getter = attrgetter(method_name_for_op(op, commute=True)) - - is_compare = is_comparison(op) - - if is_compare: - ret_doc = BINOP_RETURN_FILTER.format(op=op) - rtype = "Filter" - else: - ret_doc = BINOP_RETURN_FACTOR.format(op=op) - rtype = "Factor" - - docstring = BINOP_DOCSTRING_TEMPLATE.format( - op=op, - ret=ret_doc, - rtype=rtype, - ) - - @with_doc(docstring) - @with_name(method_name_for_op(op)) - @coerce_numbers_to_my_dtype - def binary_operator(self, other): - # This can't be hoisted up a scope because the types returned by - # binop_return_type aren't defined when the top-level function is - # invoked in the class body of Factor. - return_type = NumExprFilter if is_compare else NumExprFactor - - if isinstance(self, NumExprFactor): - self_expr, other_expr, new_inputs = self.build_binary_op( - op, - other, - ) - return return_type( - "({left}) {op} ({right})".format( - left=self_expr, - op=op, - right=other_expr, - ), - new_inputs, - dtype=binop_return_dtype(op, self.dtype, other.dtype), - ) - elif isinstance(other, NumExprFactor): - # NumericalExpression overrides ops to correctly handle merging of - # inputs. Look up and call the appropriate reflected operator with - # ourself as the input. - return commuted_method_getter(other)(self) - elif isinstance(other, Term): - if self is other: - return return_type( - "x_0 {op} x_0".format(op=op), - (self,), - dtype=binop_return_dtype(op, self.dtype, other.dtype), - ) - return return_type( - "x_0 {op} x_1".format(op=op), - (self, other), - dtype=binop_return_dtype(op, self.dtype, other.dtype), - ) - elif isinstance(other, Number): - return return_type( - "x_0 {op} ({constant})".format(op=op, constant=other), - binds=(self,), - # .dtype access is safe here because coerce_numbers_to_my_dtype - # will convert any input numbers to numpy equivalents. - dtype=binop_return_dtype(op, self.dtype, other.dtype), - ) - raise BadBinaryOperator(op, self, other) - - return binary_operator - - -def reflected_binary_operator(op): - """ - Factory function for making binary operator methods on a Factor. - - Returns a function, "reflected_binary_operator" suitable for implementing - functions like __radd__. - """ - assert not is_comparison(op) - - @with_name(method_name_for_op(op, commute=True)) - @coerce_numbers_to_my_dtype - def reflected_binary_operator(self, other): - - if isinstance(self, NumericalExpression): - self_expr, other_expr, new_inputs = self.build_binary_op(op, other) - return NumExprFactor( - "({left}) {op} ({right})".format( - left=other_expr, - right=self_expr, - op=op, - ), - new_inputs, - dtype=binop_return_dtype(op, other.dtype, self.dtype), - ) - - # Only have to handle the numeric case because in all other valid cases - # the corresponding left-binding method will be called. - elif isinstance(other, Number): - return NumExprFactor( - "{constant} {op} x_0".format(op=op, constant=other), - binds=(self,), - dtype=binop_return_dtype(op, other.dtype, self.dtype), - ) - raise BadBinaryOperator(op, other, self) - - return reflected_binary_operator - - -def unary_operator(op): - """ - Factory function for making unary operator methods for Factors. - """ - # Only negate is currently supported. - valid_ops = {"-"} - if op not in valid_ops: - raise ValueError("Invalid unary operator %s." % op) - - @with_doc("Unary Operator: '%s'" % op) - @with_name(unary_op_name(op)) - def unary_operator(self): - if self.dtype != float64_dtype: - raise TypeError( - "Can't apply unary operator {op!r} to instance of " - "{typename!r} with dtype {dtypename!r}.\n" - "{op!r} is only supported for Factors of dtype " - "'float64'.".format( - op=op, - typename=type(self).__name__, - dtypename=self.dtype.name, - ) - ) - - # This can't be hoisted up a scope because the types returned by - # unary_op_return_type aren't defined when the top-level function is - # invoked. - if isinstance(self, NumericalExpression): - return NumExprFactor( - "{op}({expr})".format(op=op, expr=self._expr), - self.inputs, - dtype=float64_dtype, - ) - else: - return NumExprFactor( - "{op}x_0".format(op=op), - (self,), - dtype=float64_dtype, - ) - - return unary_operator - - -def function_application(func): - """ - Factory function for producing function application methods for Factor - subclasses. - """ - if func not in NUMEXPR_MATH_FUNCS: - raise ValueError("Unsupported mathematical function '%s'" % func) - - docstring = dedent( - """\ - Construct a Factor that computes ``{}()`` on each output of ``self``. - - Returns - ------- - factor : ziplime.pipeline.Factor - """.format( - func - ) - ) - - @with_doc(docstring) - @with_name(func) - def mathfunc(self): - if isinstance(self, NumericalExpression): - return NumExprFactor( - "{func}({expr})".format(func=func, expr=self._expr), - self.inputs, - dtype=float64_dtype, - ) - else: - return NumExprFactor( - "{func}(x_0)".format(func=func), - (self,), - dtype=float64_dtype, - ) - - return mathfunc +from ziplime.pipeline.mixins import RestrictedDTypeMixin +from ziplime.utils.numpy_utils import float64_dtype +from ziplime.utils.sharedoc import templated_docstring -# Decorators for Factor methods. -# if_not_float64_tell_caller_to_use_isnull = restrict_to_dtype( -# dtype=float64_dtype, -# message_template=( -# "{method_name}() was called on a factor of dtype {received_dtype}.\n" -# "{method_name}() is only defined for dtype {expected_dtype}." -# "To filter missing data, use isnull() or notnull()." -# ), -# ) -# float64_only = restrict_to_dtype( -# dtype=float64_dtype, -# message_template=( -# "{method_name}() is only defined on Factors of dtype {expected_dtype}," -# " but it was called on a Factor of dtype {received_dtype}." -# ), -# ) CORRELATION_METHOD_NOTE = dedent( """\ @@ -394,71 +47,6 @@ def mathfunc(self): ) -class summary_funcs: - """Namespace of functions meant to be used with DailySummary.""" - - @staticmethod - def mean(a, missing_value): - return nanmean(a, axis=1) - - @staticmethod - def stddev(a, missing_value): - return nanstd(a, axis=1) - - @staticmethod - def max(a, missing_value): - return nanmax(a, axis=1) - - @staticmethod - def min(a, missing_value): - return nanmin(a, axis=1) - - @staticmethod - def median(a, missing_value): - return nanmedian(a, axis=1) - - @staticmethod - def sum(a, missing_value): - return nansum(a, axis=1) - - @staticmethod - def notnull_count(a, missing_value): - return (~is_missing(a, missing_value)).sum(axis=1) - - names = {k for k in locals() if not k.startswith("_")} - - -def summary_method(name): - func = getattr(summary_funcs, name) - - #@float64_only - def f(self, mask: Filter | None = None): - """Create a 1-dimensional factor computing the {} of self, each day. - - Parameters - ---------- - mask : ziplime.pipeline.Filter, optional - A Filter representing assets to consider when computing results. - If supplied, we ignore asset/date pairs where ``mask`` produces - ``False``. - - Returns - ------- - result : ziplime.pipeline.Factor - """ - return DailySummary( - func, - self, - mask=mask, - dtype=self.dtype, - ) - - f.__name__ = func.__name__ - f.__doc__ = f.__doc__.format(f.__name__) - - return f - - class Factor(RestrictedDTypeMixin, ComputableTerm): """ Pipeline API expression producing a numerical or date-valued output. @@ -641,6 +229,8 @@ def demean(self, mask: Filter | None = None, -------- :meth:`pandas.DataFrame.groupby` """ + from ziplime.pipeline.terms.factors.grouped_row_transform import GroupedRowTransform + return GroupedRowTransform( transform=demean, transform_args=(), @@ -705,6 +295,8 @@ def zscore(self, mask: Filter | None = None, -------- :meth:`pandas.DataFrame.groupby` """ + from ziplime.pipeline.terms.factors.grouped_row_transform import GroupedRowTransform + return GroupedRowTransform( transform=zscore, transform_args=(), @@ -758,6 +350,8 @@ def rank( -------- :func:`scipy.stats.rankdata` """ + from ziplime.pipeline.terms.factors.grouped_row_transform import GroupedRowTransform + from ziplime.pipeline.terms.factors.rank import Rank if groupby is None: return Rank(self, method=method, ascending=ascending, mask=mask) @@ -828,7 +422,7 @@ def pearsonr(self, target: Term, correlation_length: int, mask: Filter | None = :class:`ziplime.pipeline.factors.RollingPearsonOfReturns` :meth:`Factor.spearmanr` """ - from .statistical import RollingPearson + from .statistical.rolling_pearson import RollingPearson return RollingPearson( base_factor=self, @@ -891,7 +485,7 @@ def spearmanr(self, target: Term, correlation_length: int, mask: Filter | None = :func:`scipy.stats.spearmanr` :meth:`Factor.pearsonr` """ - from .statistical import RollingSpearman + from .statistical.rolling_spearman import RollingSpearman return RollingSpearman( base_factor=self, @@ -951,7 +545,7 @@ def linear_regression(self, target: Term, regression_length: int, mask: Filter | -------- :func:`scipy.stats.linregress` """ - from .statistical import RollingLinearRegression + from .statistical.rolling_linear_regression import RollingLinearRegression return RollingLinearRegression( dependent=self, @@ -1046,6 +640,7 @@ def winsorize( max_percentile=max_percentile, upper_bound=1.0, ) + from ziplime.pipeline.terms.factors import GroupedRowTransform return GroupedRowTransform( transform=winsorize, transform_args=(min_percentile, max_percentile), @@ -1298,7 +893,7 @@ def clip(self, min_bound, max_bound, mask=None): -------- numpy.clip """ - from .basic import Clip + from .basic.clip import Clip return Clip( inputs=[self], @@ -1311,554 +906,6 @@ def _principal_computable_term_type(cls): return Factor -class NumExprFactor(NumericalExpression, Factor): - """ - Factor computed from a numexpr expression. - - Parameters - ---------- - expr : string - A string suitable for passing to numexpr. All variables in 'expr' - should be of the form "x_i", where i is the index of the corresponding - factor input in 'binds'. - binds : tuple - A tuple of factors to use as inputs. - - Notes - ----- - NumExprFactors are constructed by numerical operators like `+` and `-`. - Users should rarely need to construct a NumExprFactor directly. - """ - - pass - - -class GroupedRowTransform(Factor): - """ - A Factor that transforms an input factor by applying a row-wise - shape-preserving transformation on classifier-defined groups of that - Factor. - - This is most often useful for normalization operators like ``zscore`` or - ``demean`` or for performing ranking using ``rank``. - - Parameters - ---------- - transform : function[ndarray[ndim=1] -> ndarray[ndim=1]] - Function to apply over each row group. - factor : ziplime.pipeline.Factor - The factor providing baseline data to transform. - mask : ziplime.pipeline.Filter - Mask of entries to ignore when calculating transforms. - groupby : ziplime.pipeline.Classifier - Classifier partitioning ``factor`` into groups to use when calculating - means. - transform_args : tuple[hashable] - Additional positional arguments to forward to ``transform``. - - Notes - ----- - Users should rarely construct instances of this factor directly. Instead, - they should construct instances via factor normalization methods like - ``zscore`` and ``demean`` or using ``rank`` with ``groupby``. - - See Also - -------- - ziplime.pipeline.Factor.zscore - ziplime.pipeline.Factor.demean - ziplime.pipeline.Factor.rank - """ - - window_length = 0 - - def __new__( - cls, - transform, - transform_args, - factor, - groupby, - dtype, - missing_value, - mask, - **kwargs, - ): - - if mask is None: - mask = factor.mask - else: - mask = mask & factor.mask - - if groupby is None: - groupby = Everything(mask=mask) - - return super(GroupedRowTransform, cls).__new__( - GroupedRowTransform, - transform=transform, - transform_args=transform_args, - inputs=(factor, groupby), - missing_value=missing_value, - mask=mask, - dtype=dtype, - **kwargs, - ) - - def _init(self, transform, transform_args, *args, **kwargs): - self._transform = transform - self._transform_args = transform_args - return super(GroupedRowTransform, self)._init(*args, **kwargs) - - @classmethod - def _static_identity(cls, transform, transform_args, *args, **kwargs): - return ( - super(GroupedRowTransform, cls)._static_identity(*args, **kwargs), - transform, - transform_args, - ) - - def _compute(self, arrays, dates, assets, mask): - data = arrays[0] - group_labels, null_label = self.inputs[1]._to_integral(arrays[1]) - # Make a copy with the null code written to masked locations. - group_labels = where(mask, group_labels, null_label) - return where( - group_labels != null_label, - naive_grouped_rowwise_apply( - data=data, - group_labels=group_labels, - func=self._transform, - func_args=self._transform_args, - out=empty_like(data, dtype=self.dtype), - ), - self.missing_value, - ) - - @property - def transform_name(self): - return self._transform.__name__ - - def graph_repr(self): - """Short repr to use when rendering Pipeline graphs.""" - return type(self).__name__ + "(%r)" % self.transform_name -class Rank(SingleInputMixin, Factor): - """ - A Factor representing the row-wise rank data of another Factor. - - Parameters - ---------- - factor : ziplime.pipeline.Factor - The factor on which to compute ranks. - method : str, {'average', 'min', 'max', 'dense', 'ordinal'} - The method used to assign ranks to tied elements. See - `scipy.stats.rankdata` for a full description of the semantics for each - ranking method. - - See Also - -------- - :func:`scipy.stats.rankdata` - :class:`Factor.rank` - - Notes - ----- - Most users should call Factor.rank rather than directly construct an - instance of this class. - """ - - window_length = 0 - dtype = float64_dtype - window_safe = True - - def __new__(cls, factor, method, ascending, mask): - return super(Rank, cls).__new__( - cls, - inputs=(factor,), - method=method, - ascending=ascending, - mask=mask, - ) - - def _init(self, method, ascending, *args, **kwargs): - self._method = method - self._ascending = ascending - return super(Rank, self)._init(*args, **kwargs) - - @classmethod - def _static_identity(cls, method, ascending, *args, **kwargs): - return ( - super(Rank, cls)._static_identity(*args, **kwargs), - method, - ascending, - ) - - def _validate(self): - """ - Verify that the stored rank method is valid. - """ - if self._method not in _RANK_METHODS: - raise UnknownRankMethod( - method=self._method, - choices=set(_RANK_METHODS), - ) - return super(Rank, self)._validate() - - def _compute(self, arrays, dates, assets, mask): - """ - For each row in the input, compute a like-shaped array of per-row - ranks. - """ - return masked_rankdata_2d( - arrays[0], - mask, - self.inputs[0].missing_value, - self._method, - self._ascending, - ) - def __repr__(self): - if self.mask is AssetExists(): - # Don't include mask in repr if it's the default. - mask_info = "" - else: - mask_info = ", mask={}".format(self.mask.recursive_repr()) - - return "{type}({input_}, method='{method}'{mask_info})".format( - type=type(self).__name__, - input_=self.inputs[0].recursive_repr(), - method=self._method, - mask_info=mask_info, - ) - - def graph_repr(self): - # Graphviz interprets `\l` as "divide label into lines, left-justified" - return "Rank:\\l method: {!r}\\l mask: {}\\l".format( - self._method, - type(self.mask).__name__, - ) - - -class CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor): - ''' - Base class for user-defined Factors. - - Parameters - ---------- - inputs : iterable, optional - An iterable of `BoundColumn` instances (e.g. USEquityPricing.close), - describing the data to load and pass to `self.compute`. If this - argument is not passed to the CustomFactor constructor, we look for a - class-level attribute named `inputs`. - outputs : iterable[str], optional - An iterable of strings which represent the names of each output this - factor should compute and return. If this argument is not passed to the - CustomFactor constructor, we look for a class-level attribute named - `outputs`. - window_length : int, optional - Number of rows to pass for each input. If this argument is not passed - to the CustomFactor constructor, we look for a class-level attribute - named `window_length`. - mask : ziplime.pipeline.Filter, optional - A Filter describing the assets on which we should compute each day. - Each call to ``CustomFactor.compute`` will only receive assets for - which ``mask`` produced True on the day for which compute is being - called. - - Notes - ----- - Users implementing their own Factors should subclass CustomFactor and - implement a method named `compute` with the following signature: - - .. code-block:: python - - def compute(self, today, assets, out, *inputs): - ... - - On each simulation date, ``compute`` will be called with the current date, - an array of sids, an output array, and an input array for each expression - passed as inputs to the CustomFactor constructor. - - The specific types of the values passed to `compute` are as follows:: - - today : np.datetime64[ns] - Row label for the last row of all arrays passed as `inputs`. - assets : np.array[int64, ndim=1] - Column labels for `out` and`inputs`. - out : np.array[self.dtype, ndim=1] - Output array of the same shape as `assets`. `compute` should write - its desired return values into `out`. If multiple outputs are - specified, `compute` should write its desired return values into - `out.` for each output name in `self.outputs`. - *inputs : tuple of np.array - Raw data arrays corresponding to the values of `self.inputs`. - - ``compute`` functions should expect to be passed NaN values for dates on - which no data was available for an asset. This may include dates on which - an asset did not yet exist. - - For example, if a CustomFactor requires 10 rows of close price data, and - asset A started trading on Monday June 2nd, 2014, then on Tuesday, June - 3rd, 2014, the column of input data for asset A will have 9 leading NaNs - for the preceding days on which data was not yet available. - - Examples - -------- - - A CustomFactor with pre-declared defaults: - - .. code-block:: python - - class TenDayRange(CustomFactor): - """ - Computes the difference between the highest high in the last 10 - days and the lowest low. - - Pre-declares high and low as default inputs and `window_length` as - 10. - """ - - inputs = [USEquityPricing.high, USEquityPricing.low] - window_length = 10 - - def compute(self, today, assets, out, highs, lows): - from numpy import nanmin, nanmax - - highest_highs = nanmax(highs, axis=0) - lowest_lows = nanmin(lows, axis=0) - out[:] = highest_highs - lowest_lows - - - # Doesn't require passing inputs or window_length because they're - # pre-declared as defaults for the TenDayRange class. - ten_day_range = TenDayRange() - - A CustomFactor without defaults: - - .. code-block:: python - - class MedianValue(CustomFactor): - """ - Computes the median value of an arbitrary single input over an - arbitrary window.. - - Does not declare any defaults, so values for `window_length` and - `inputs` must be passed explicitly on every construction. - """ - - def compute(self, today, assets, out, data): - from numpy import nanmedian - out[:] = data.nanmedian(data, axis=0) - - # Values for `inputs` and `window_length` must be passed explicitly to - # MedianValue. - median_close10 = MedianValue([USEquityPricing.close], window_length=10) - median_low15 = MedianValue([USEquityPricing.low], window_length=15) - - A CustomFactor with multiple outputs: - - .. code-block:: python - - class MultipleOutputs(CustomFactor): - inputs = [USEquityPricing.close] - outputs = ['alpha', 'beta'] - window_length = N - - def compute(self, today, assets, out, close): - computed_alpha, computed_beta = some_function(close) - out.alpha[:] = computed_alpha - out.beta[:] = computed_beta - - # Each output is returned as its own Factor upon instantiation. - alpha, beta = MultipleOutputs() - - # Equivalently, we can create a single factor instance and access each - # output as an attribute of that instance. - multiple_outputs = MultipleOutputs() - alpha = multiple_outputs.alpha - beta = multiple_outputs.beta - - Note: If a CustomFactor has multiple outputs, all outputs must have the - same dtype. For instance, in the example above, if alpha is a float then - beta must also be a float. - ''' - - dtype = float64_dtype - - def _validate(self): - try: - super(CustomFactor, self)._validate() - except UnsupportedDataType as exc: - if self.dtype in CLASSIFIER_DTYPES: - raise UnsupportedDataType( - typename=type(self).__name__, - dtype=self.dtype, - hint="Did you mean to create a CustomClassifier?", - ) from exc - elif self.dtype in FILTER_DTYPES: - raise UnsupportedDataType( - typename=type(self).__name__, - dtype=self.dtype, - hint="Did you mean to create a CustomFilter?", - ) from exc - raise - - def __getattribute__(self, name): - outputs = object.__getattribute__(self, "outputs") - if outputs is None: - return super(CustomFactor, self).__getattribute__(name) - elif name in outputs: - return RecarrayField(factor=self, attribute=name) - else: - try: - return super(CustomFactor, self).__getattribute__(name) - except AttributeError as exc: - raise AttributeError( - "Instance of {factor} has no output named {attr!r}. " - "Possible choices are: {choices}.".format( - factor=type(self).__name__, - attr=name, - choices=self.outputs, - ) - ) from exc - - def __iter__(self): - if self.outputs is None: - raise ValueError( - "{factor} does not have multiple outputs.".format( - factor=type(self).__name__, - ) - ) - return (RecarrayField(self, attr) for attr in self.outputs) - - -class RecarrayField(SingleInputMixin, Factor): - """ - A single field from a multi-output factor. - """ - - def __new__(cls, factor, attribute): - return super(RecarrayField, cls).__new__( - cls, - attribute=attribute, - inputs=[factor], - window_length=0, - mask=factor.mask, - dtype=factor.dtype, - missing_value=factor.missing_value, - window_safe=factor.window_safe, - ) - - def _init(self, attribute, *args, **kwargs): - self._attribute = attribute - return super(RecarrayField, self)._init(*args, **kwargs) - - @classmethod - def _static_identity(cls, attribute, *args, **kwargs): - return ( - super(RecarrayField, cls)._static_identity(*args, **kwargs), - attribute, - ) - - def _compute(self, windows, dates, assets, mask): - return windows[0][self._attribute] - - def graph_repr(self): - return "{}.{}".format(self.inputs[0].recursive_repr(), self._attribute) - - -class Latest(LatestMixin, CustomFactor): - """ - Factor producing the most recently-known value of `inputs[0]` on each day. - - The `.latest` attribute of DataSet columns returns an instance of this - Factor. - """ - - window_length = 1 - - def compute(self, today, assets, out, data): - out[:] = data[-1] - - -class DailySummary(SingleInputMixin, Factor): - """1D Factor that computes a summary statistic across all assets.""" - - ndim = 1 - window_length = 0 - params = ("func",) - - def __new__(cls, func, input_, mask, dtype): - # TODO: We should be able to support datetime64 as well, but that - # requires extra care for handling NaT. - if dtype != float64_dtype: - raise AssertionError( - "DailySummary only supports float64 dtype, got {}".format(dtype), - ) - - return super(DailySummary, cls).__new__( - cls, - inputs=[input_], - dtype=dtype, - missing_value=nan, - window_safe=input_.window_safe, - func=func, - mask=mask, - ) - - def _compute(self, arrays, dates, assets, mask): - func = self.params["func"] - - data = arrays[0] - data[~mask] = nan - if not isnan(self.inputs[0].missing_value): - data[data == self.inputs[0].missing_value] = nan - - return as_column(func(data, self.inputs[0].missing_value)) - - def __repr__(self): - return "{}.{}()".format( - self.inputs[0].recursive_repr(), - self.params["func"].__name__, - ) - - graph_repr = recursive_repr = __repr__ - - -# Functions to be passed to GroupedRowTransform. These aren't defined inline -# because the transformation function is part of the instance hash key. -def demean(row): - return row - nanmean(row) - - -def zscore(row): - with np.errstate(divide="ignore", invalid="ignore"): - return (row - nanmean(row)) / nanstd(row) - - -def winsorize(row, min_percentile, max_percentile): - """ - This implementation is based on scipy.stats.mstats.winsorize - """ - a = row.copy() - nan_count = isnan(row).sum() - nonnan_count = a.size - nan_count - - # NOTE: argsort() sorts nans to the end of the array. - idx = a.argsort() - - # Set values at indices below the min percentile to the value of the entry - # at the cutoff. - if min_percentile > 0: - lower_cutoff = int(min_percentile * nonnan_count) - a[idx[:lower_cutoff]] = a[idx[lower_cutoff]] - - # Set values at indices above the max percentile to the value of the entry - # at the cutoff. - if max_percentile < 1: - upper_cutoff = int(ceil(nonnan_count * max_percentile)) - # if max_percentile is close to 1, then upper_cutoff might not - # remove any values. - if upper_cutoff < nonnan_count: - start_of_nans = (-nan_count) if nan_count else None - a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]] - - return a diff --git a/ziplime/pipeline/terms/factors/grouped_row_transform.py b/ziplime/pipeline/terms/factors/grouped_row_transform.py new file mode 100644 index 0000000000..70495c3a31 --- /dev/null +++ b/ziplime/pipeline/terms/factors/grouped_row_transform.py @@ -0,0 +1,113 @@ +from numpy import empty_like, where + +from ziplime.lib.normalize import naive_grouped_rowwise_apply +from ziplime.pipeline.terms.factors.factor import Factor +from ziplime.pipeline.terms.classifiers import Everything + + +class GroupedRowTransform(Factor): + """ + A Factor that transforms an input factor by applying a row-wise + shape-preserving transformation on classifier-defined groups of that + Factor. + + This is most often useful for normalization operators like ``zscore`` or + ``demean`` or for performing ranking using ``rank``. + + Parameters + ---------- + transform : function[ndarray[ndim=1] -> ndarray[ndim=1]] + Function to apply over each row group. + factor : ziplime.pipeline.Factor + The factor providing baseline data to transform. + mask : ziplime.pipeline.Filter + Mask of entries to ignore when calculating transforms. + groupby : ziplime.pipeline.Classifier + Classifier partitioning ``factor`` into groups to use when calculating + means. + transform_args : tuple[hashable] + Additional positional arguments to forward to ``transform``. + + Notes + ----- + Users should rarely construct instances of this factor directly. Instead, + they should construct instances via factor normalization methods like + ``zscore`` and ``demean`` or using ``rank`` with ``groupby``. + + See Also + -------- + ziplime.pipeline.Factor.zscore + ziplime.pipeline.Factor.demean + ziplime.pipeline.Factor.rank + """ + + window_length = 0 + + def __new__( + cls, + transform, + transform_args, + factor, + groupby, + dtype, + missing_value, + mask, + **kwargs, + ): + + if mask is None: + mask = factor.mask + else: + mask = mask & factor.mask + + if groupby is None: + groupby = Everything(mask=mask) + + return super(GroupedRowTransform, cls).__new__( + GroupedRowTransform, + transform=transform, + transform_args=transform_args, + inputs=(factor, groupby), + missing_value=missing_value, + mask=mask, + dtype=dtype, + **kwargs, + ) + + def _init(self, transform, transform_args, *args, **kwargs): + self._transform = transform + self._transform_args = transform_args + return super(GroupedRowTransform, self)._init(*args, **kwargs) + + @classmethod + def _static_identity(cls, transform, transform_args, *args, **kwargs): + return ( + super(GroupedRowTransform, cls)._static_identity(*args, **kwargs), + transform, + transform_args, + ) + + def _compute(self, arrays, dates, assets, mask): + data = arrays[0] + group_labels, null_label = self.inputs[1]._to_integral(arrays[1]) + # Make a copy with the null code written to masked locations. + group_labels = where(mask, group_labels, null_label) + return where( + group_labels != null_label, + naive_grouped_rowwise_apply( + data=data, + group_labels=group_labels, + func=self._transform, + func_args=self._transform_args, + out=empty_like(data, dtype=self.dtype), + ), + self.missing_value, + ) + + @property + def transform_name(self): + return self._transform.__name__ + + def graph_repr(self): + """Short repr to use when rendering Pipeline graphs.""" + return type(self).__name__ + "(%r)" % self.transform_name diff --git a/ziplime/pipeline/terms/factors/latest.py b/ziplime/pipeline/terms/factors/latest.py new file mode 100644 index 0000000000..feb0d48aa8 --- /dev/null +++ b/ziplime/pipeline/terms/factors/latest.py @@ -0,0 +1,17 @@ +from ziplime.pipeline.terms.factors.custom_factor import CustomFactor +from ziplime.pipeline.mixins import LatestMixin + + +class Latest(LatestMixin, CustomFactor): + """ + Factor producing the most recently-known value of `inputs[0]` on each day. + + The `.latest` attribute of DataSet columns returns an instance of this + Factor. + """ + + window_length = 1 + + def compute(self, today, assets, out, data): + out[:] = data[-1] + diff --git a/ziplime/pipeline/terms/factors/num_expr_factor.py b/ziplime/pipeline/terms/factors/num_expr_factor.py new file mode 100644 index 0000000000..d5a658b780 --- /dev/null +++ b/ziplime/pipeline/terms/factors/num_expr_factor.py @@ -0,0 +1,24 @@ +from ziplime.pipeline.terms.factors.factor import Factor +from ziplime.pipeline.expression import NumericalExpression + + +class NumExprFactor(NumericalExpression, Factor): + """ + Factor computed from a numexpr expression. + + Parameters + ---------- + expr : string + A string suitable for passing to numexpr. All variables in 'expr' + should be of the form "x_i", where i is the index of the corresponding + factor input in 'binds'. + binds : tuple + A tuple of factors to use as inputs. + + Notes + ----- + NumExprFactors are constructed by numerical operators like `+` and `-`. + Users should rarely need to construct a NumExprFactor directly. + """ + + pass \ No newline at end of file diff --git a/ziplime/pipeline/terms/factors/rank.py b/ziplime/pipeline/terms/factors/rank.py new file mode 100644 index 0000000000..71a58a43ba --- /dev/null +++ b/ziplime/pipeline/terms/factors/rank.py @@ -0,0 +1,104 @@ +from ziplime.pipeline.terms.factors.factor import Factor +from ziplime.pipeline.terms.asset_exists import AssetExists +from ziplime.errors import UnknownRankMethod +from ziplime.lib.rank import masked_rankdata_2d +from ziplime.pipeline.mixins import SingleInputMixin + +from ziplime.utils.numpy_utils import float64_dtype + +_RANK_METHODS = frozenset(["average", "min", "max", "dense", "ordinal"]) + +class Rank(SingleInputMixin, Factor): + """ + A Factor representing the row-wise rank data of another Factor. + + Parameters + ---------- + factor : ziplime.pipeline.Factor + The factor on which to compute ranks. + method : str, {'average', 'min', 'max', 'dense', 'ordinal'} + The method used to assign ranks to tied elements. See + `scipy.stats.rankdata` for a full description of the semantics for each + ranking method. + + See Also + -------- + :func:`scipy.stats.rankdata` + :class:`Factor.rank` + + Notes + ----- + Most users should call Factor.rank rather than directly construct an + instance of this class. + """ + + window_length = 0 + dtype = float64_dtype + window_safe = True + + def __new__(cls, factor, method, ascending, mask): + return super(Rank, cls).__new__( + cls, + inputs=(factor,), + method=method, + ascending=ascending, + mask=mask, + ) + + def _init(self, method, ascending, *args, **kwargs): + self._method = method + self._ascending = ascending + return super(Rank, self)._init(*args, **kwargs) + + @classmethod + def _static_identity(cls, method, ascending, *args, **kwargs): + return ( + super(Rank, cls)._static_identity(*args, **kwargs), + method, + ascending, + ) + + def _validate(self): + """ + Verify that the stored rank method is valid. + """ + if self._method not in _RANK_METHODS: + raise UnknownRankMethod( + method=self._method, + choices=set(_RANK_METHODS), + ) + return super(Rank, self)._validate() + + def _compute(self, arrays, dates, assets, mask): + """ + For each row in the input, compute a like-shaped array of per-row + ranks. + """ + return masked_rankdata_2d( + arrays[0], + mask, + self.inputs[0].missing_value, + self._method, + self._ascending, + ) + + def __repr__(self): + if self.mask is AssetExists(): + # Don't include mask in repr if it's the default. + mask_info = "" + else: + mask_info = ", mask={}".format(self.mask.recursive_repr()) + + return "{type}({input_}, method='{method}'{mask_info})".format( + type=type(self).__name__, + input_=self.inputs[0].recursive_repr(), + method=self._method, + mask_info=mask_info, + ) + + def graph_repr(self): + # Graphviz interprets `\l` as "divide label into lines, left-justified" + return "Rank:\\l method: {!r}\\l mask: {}\\l".format( + self._method, + type(self.mask).__name__, + ) diff --git a/ziplime/pipeline/terms/factors/recarray_field.py b/ziplime/pipeline/terms/factors/recarray_field.py new file mode 100644 index 0000000000..42c0268224 --- /dev/null +++ b/ziplime/pipeline/terms/factors/recarray_field.py @@ -0,0 +1,37 @@ +from ziplime.pipeline.terms.factors.factor import Factor +from ziplime.pipeline.mixins import SingleInputMixin + + +class RecarrayField(SingleInputMixin, Factor): + """ + A single field from a multi-output factor. + """ + + def __new__(cls, factor, attribute): + return super(RecarrayField, cls).__new__( + cls, + attribute=attribute, + inputs=[factor], + window_length=0, + mask=factor.mask, + dtype=factor.dtype, + missing_value=factor.missing_value, + window_safe=factor.window_safe, + ) + + def _init(self, attribute, *args, **kwargs): + self._attribute = attribute + return super(RecarrayField, self)._init(*args, **kwargs) + + @classmethod + def _static_identity(cls, attribute, *args, **kwargs): + return ( + super(RecarrayField, cls)._static_identity(*args, **kwargs), + attribute, + ) + + def _compute(self, windows, dates, assets, mask): + return windows[0][self._attribute] + + def graph_repr(self): + return "{}.{}".format(self.inputs[0].recursive_repr(), self._attribute) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/__init__.py b/ziplime/pipeline/terms/factors/statistical/__init__.py similarity index 100% rename from ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/__init__.py rename to ziplime/pipeline/terms/factors/statistical/__init__.py diff --git a/ziplime/pipeline/terms/factors/statistical/rolling_correlation.py b/ziplime/pipeline/terms/factors/statistical/rolling_correlation.py new file mode 100644 index 0000000000..b511709bdd --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/rolling_correlation.py @@ -0,0 +1,21 @@ +import numpy as np + + +from ziplime.errors import IncompatibleTerms +from ziplime.pipeline.terms.factors import CustomFactor + + +class RollingCorrelation(CustomFactor): + + def __new__(cls, base_factor: np.float64 | np.float32, target: np.float64 | np.float32, correlation_length, + mask=None): + if target.ndim == 2 and base_factor.mask is not target.mask: + raise IncompatibleTerms(term_1=base_factor, term_2=target) + if correlation_length < 2: + raise ValueError("correlation_length must be greater than or equal to 2") + return super(RollingCorrelation, cls).__new__( + cls, + inputs=[base_factor, target], + window_length=correlation_length, + mask=mask, + ) \ No newline at end of file diff --git a/ziplime/pipeline/terms/factors/statistical/rolling_linear_regression.py b/ziplime/pipeline/terms/factors/statistical/rolling_linear_regression.py new file mode 100644 index 0000000000..789c746461 --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/rolling_linear_regression.py @@ -0,0 +1,84 @@ +import numpy as np +from numpy import broadcast_arrays +from scipy.stats import ( + linregress, +) + +from ziplime.errors import IncompatibleTerms +from ziplime.pipeline.terms.factors import CustomFactor + + + + +class RollingLinearRegression(CustomFactor): + """ + A Factor that performs an ordinary least-squares regression predicting the + columns of a given Factor from either the columns of another + Factor/BoundColumn or a slice/single column of data. + + Parameters + ---------- + dependent : ziplime.pipeline.Factor + The factor whose columns are the predicted/dependent variable of each + regression with `independent`. + independent : ziplime.pipeline.slice.Slice or ziplime.pipeline.Factor + The factor/slice whose columns are the predictor/independent variable + of each regression with `dependent`. If `independent` is a Factor, + regressions are computed asset-wise. + regression_length : int + Length of the lookback window over which to compute each regression. + mask : ziplime.pipeline.Filter, optional + A Filter describing which assets (columns) of `dependent` should be + regressed against `independent` each day. + + See Also + -------- + :func:`scipy.stats.linregress` + :meth:`Factor.linear_regression` + :class:`ziplime.pipeline.factors.RollingLinearRegressionOfReturns` + + Notes + ----- + Most users should call Factor.linear_regression rather than directly + construct an instance of this class. + """ + + outputs = ["alpha", "beta", "r_value", "p_value", "stderr"] + + def __new__(cls, dependent: np.float64 | np.int64, independent: np.float64 | np.int64, + regression_length, mask=None): + if independent.ndim == 2 and dependent.mask is not independent.mask: + raise IncompatibleTerms(term_1=dependent, term_2=independent) + if regression_length < 2: + raise ValueError("regression_length must be greater than or equal to 2") + return super(RollingLinearRegression, cls).__new__( + cls, + inputs=[dependent, independent], + window_length=regression_length, + mask=mask, + ) + + def compute(self, today, assets, out, dependent, independent): + alpha = out.alpha + beta = out.beta + r_value = out.r_value + p_value = out.p_value + stderr = out.stderr + + def regress(y, x): + regr_results = linregress(y=y, x=x) + # `linregress` returns its results in the following order: + # slope, intercept, r-value, p-value, stderr + alpha[i] = regr_results[1] + beta[i] = regr_results[0] + r_value[i] = regr_results[2] + p_value[i] = regr_results[3] + stderr[i] = regr_results[4] + + # If `independent` is a Slice or single column of data, broadcast it + # out to the same shape as `dependent`, then compute column-wise. This + # is efficient because each column of the broadcasted array only refers + # to a single memory location. + independent = broadcast_arrays(independent, dependent)[0] + for i in range(len(out)): + regress(y=dependent[:, i], x=independent[:, i]) diff --git a/ziplime/pipeline/terms/factors/statistical/rolling_linear_regression_of_returns.py b/ziplime/pipeline/terms/factors/statistical/rolling_linear_regression_of_returns.py new file mode 100644 index 0000000000..8f347ca0a8 --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/rolling_linear_regression_of_returns.py @@ -0,0 +1,120 @@ +from ziplime.pipeline.terms.asset_exists import AssetExists +from ziplime.pipeline.terms.filters import SingleAsset + +from ziplime.pipeline.terms.factors.basic.returns import Returns +from ziplime.pipeline.terms.factors.statistical.rolling_linear_regression import RollingLinearRegression + + +class RollingLinearRegressionOfReturns(RollingLinearRegression): + """Perform an ordinary least-squares regression predicting the returns of all + other assets on the given asset. + + Parameters + ---------- + target : ziplime.assets.Asset + The asset to regress against all other assets. + returns_length : int >= 2 + Length of the lookback window over which to compute returns. Daily + returns require a window length of 2. + regression_length : int >= 1 + Length of the lookback window over which to compute each regression. + mask : ziplime.pipeline.Filter, optional + A Filter describing which assets should be regressed against the target + asset each day. + + Notes + ----- + Computing this factor over many assets can be time consuming. It is + recommended that a mask be used in order to limit the number of assets over + which regressions are computed. + + This factor is designed to return five outputs: + + - alpha, a factor that computes the intercepts of each regression. + - beta, a factor that computes the slopes of each regression. + - r_value, a factor that computes the correlation coefficient of each + regression. + - p_value, a factor that computes, for each regression, the two-sided + p-value for a hypothesis test whose null hypothesis is that the slope is + zero. + - stderr, a factor that computes the standard error of the estimate of each + regression. + + For more help on factors with multiple outputs, see + :class:`ziplime.pipeline.CustomFactor`. + + Examples + -------- + Let the following be example 10-day returns for three different assets:: + + SPY MSFT FB + 2017-03-13 -.03 .03 .04 + 2017-03-14 -.02 -.03 .02 + 2017-03-15 -.01 .02 .01 + 2017-03-16 0 -.02 .01 + 2017-03-17 .01 .04 -.01 + 2017-03-20 .02 -.03 -.02 + 2017-03-21 .03 .01 -.02 + 2017-03-22 .04 -.02 -.02 + + Suppose we are interested in predicting each stock's returns from SPY's + over rolling 5-day look back windows. We can compute rolling regression + coefficients (alpha and beta) from 2017-03-17 to 2017-03-22 by doing:: + + regression_factor = RollingRegressionOfReturns( + target=sid(8554), + returns_length=10, + regression_length=5, + ) + alpha = regression_factor.alpha + beta = regression_factor.beta + + The result of computing ``alpha`` from 2017-03-17 to 2017-03-22 gives:: + + SPY MSFT FB + 2017-03-17 0 .011 .003 + 2017-03-20 0 -.004 .004 + 2017-03-21 0 .007 .006 + 2017-03-22 0 .002 .008 + + And the result of computing ``beta`` from 2017-03-17 to 2017-03-22 gives:: + + SPY MSFT FB + 2017-03-17 1 .3 -1.1 + 2017-03-20 1 .2 -1 + 2017-03-21 1 -.3 -1 + 2017-03-22 1 -.3 -.9 + + Note that SPY's column for alpha is all 0's and for beta is all 1's, as the + regression line of SPY with itself is simply the function y = x. + + To understand how each of the other values were calculated, take for + example MSFT's ``alpha`` and ``beta`` values on 2017-03-17 (.011 and .3, + respectively). These values are the result of running a linear regression + predicting MSFT's returns from SPY's returns, using values starting at + 2017-03-17 and looking back 5 days. That is, the regression was run with + x = [-.03, -.02, -.01, 0, .01] and y = [.03, -.03, .02, -.02, .04], and it + produced a slope of .3 and an intercept of .011. + + See Also + -------- + :class:`ziplime.pipeline.factors.RollingPearsonOfReturns` + :class:`ziplime.pipeline.factors.RollingSpearmanOfReturns` + """ + + window_safe = True + + def __new__(cls, target, returns_length, regression_length, mask=None): + # Use the `SingleAsset` filter here because it protects against + # inputting a non-existent target asset. + returns = Returns( + window_length=returns_length, + mask=(AssetExists() | SingleAsset(asset=target)), + ) + return super(RollingLinearRegressionOfReturns, cls).__new__( + cls, + dependent=returns, + independent=returns[target], + regression_length=regression_length, + mask=mask, + ) diff --git a/ziplime/pipeline/terms/factors/statistical/rolling_pearson.py b/ziplime/pipeline/terms/factors/statistical/rolling_pearson.py new file mode 100644 index 0000000000..d0b1adcb06 --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/rolling_pearson.py @@ -0,0 +1,48 @@ +from ziplime.pipeline.terms.factors.statistical.rolling_correlation import RollingCorrelation +from ziplime.pipeline.terms.factors.statistical import vectorized_pearson_r + + +class RollingPearson(RollingCorrelation): + """ + A Factor that computes pearson correlation coefficients between the columns + of a given Factor and either the columns of another Factor/BoundColumn or a + slice/single column of data. + + Parameters + ---------- + base_factor : ziplime.pipeline.Factor + The factor for which to compute correlations of each of its columns + with `target`. + target : ziplime.pipeline.Term with a numeric dtype + The term with which to compute correlations against each column of data + produced by `base_factor`. This term may be a Factor, a BoundColumn or + a Slice. If `target` is two-dimensional, correlations are computed + asset-wise. + correlation_length : int + Length of the lookback window over which to compute each correlation + coefficient. + mask : ziplime.pipeline.Filter, optional + A Filter describing which assets (columns) of `base_factor` should have + their correlation with `target` computed each day. + + See Also + -------- + :func:`scipy.stats.pearsonr` + :meth:`Factor.pearsonr` + :class:`ziplime.pipeline.factors.RollingPearsonOfReturns` + + Notes + ----- + Most users should call Factor.pearsonr rather than directly construct an + instance of this class. + """ + + window_safe = True + + def compute(self, today, assets, out, base_data, target_data): + vectorized_pearson_r( + base_data, + target_data, + allowed_missing=0, + out=out, + ) diff --git a/ziplime/pipeline/terms/factors/statistical/rolling_pearson_of_returns.py b/ziplime/pipeline/terms/factors/statistical/rolling_pearson_of_returns.py new file mode 100644 index 0000000000..2396d3e0c3 --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/rolling_pearson_of_returns.py @@ -0,0 +1,95 @@ +from ziplime.pipeline.terms.asset_exists import AssetExists +from ziplime.pipeline.terms.factors import RollingPearson, Returns +from ziplime.pipeline.terms.filters import SingleAsset + + + + +class RollingPearsonOfReturns(RollingPearson): + """ + Calculates the Pearson product-moment correlation coefficient of the + returns of the given asset with the returns of all other assets. + + Pearson correlation is what most people mean when they say "correlation + coefficient" or "R-value". + + Parameters + ---------- + target : ziplime.assets.Asset + The asset to correlate with all other assets. + returns_length : int >= 2 + Length of the lookback window over which to compute returns. Daily + returns require a window length of 2. + correlation_length : int >= 1 + Length of the lookback window over which to compute each correlation + coefficient. + mask : ziplime.pipeline.Filter, optional + A Filter describing which assets should have their correlation with the + target asset computed each day. + + Notes + ----- + Computing this factor over many assets can be time consuming. It is + recommended that a mask be used in order to limit the number of assets over + which correlations are computed. + + Examples + -------- + Let the following be example 10-day returns for three different assets:: + + SPY MSFT FB + 2017-03-13 -.03 .03 .04 + 2017-03-14 -.02 -.03 .02 + 2017-03-15 -.01 .02 .01 + 2017-03-16 0 -.02 .01 + 2017-03-17 .01 .04 -.01 + 2017-03-20 .02 -.03 -.02 + 2017-03-21 .03 .01 -.02 + 2017-03-22 .04 -.02 -.02 + + Suppose we are interested in SPY's rolling returns correlation with each + stock from 2017-03-17 to 2017-03-22, using a 5-day look back window (that + is, we calculate each correlation coefficient over 5 days of data). We can + achieve this by doing:: + + rolling_correlations = RollingPearsonOfReturns( + target=sid(8554), + returns_length=10, + correlation_length=5, + ) + + The result of computing ``rolling_correlations`` from 2017-03-17 to + 2017-03-22 gives:: + + SPY MSFT FB + 2017-03-17 1 .15 -.96 + 2017-03-20 1 .10 -.96 + 2017-03-21 1 -.16 -.94 + 2017-03-22 1 -.16 -.85 + + Note that the column for SPY is all 1's, as the correlation of any data + series with itself is always 1. To understand how each of the other values + were calculated, take for example the .15 in MSFT's column. This is the + correlation coefficient between SPY's returns looking back from 2017-03-17 + (-.03, -.02, -.01, 0, .01) and MSFT's returns (.03, -.03, .02, -.02, .04). + + See Also + -------- + :class:`ziplime.pipeline.factors.RollingSpearmanOfReturns` + :class:`ziplime.pipeline.factors.RollingLinearRegressionOfReturns` + """ + + def __new__(cls, target, returns_length, correlation_length, mask=None): + # Use the `SingleAsset` filter here because it protects against + # inputting a non-existent target asset. + returns = Returns( + window_length=returns_length, + mask=(AssetExists() | SingleAsset(asset=target)), + ) + return super(RollingPearsonOfReturns, cls).__new__( + cls, + base_factor=returns, + target=returns[target], + correlation_length=correlation_length, + mask=mask, + ) \ No newline at end of file diff --git a/ziplime/pipeline/terms/factors/statistical/rolling_spearman.py b/ziplime/pipeline/terms/factors/statistical/rolling_spearman.py new file mode 100644 index 0000000000..82063d2f45 --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/rolling_spearman.py @@ -0,0 +1,53 @@ +from numpy import broadcast_arrays +from scipy.stats import ( + spearmanr, +) + +from ziplime.pipeline.terms.factors.statistical.rolling_correlation import RollingCorrelation + + +class RollingSpearman(RollingCorrelation): + """ + A Factor that computes spearman rank correlation coefficients between the + columns of a given Factor and either the columns of another + Factor/BoundColumn or a slice/single column of data. + + Parameters + ---------- + base_factor : ziplime.pipeline.Factor + The factor for which to compute correlations of each of its columns + with `target`. + target : ziplime.pipeline.Term with a numeric dtype + The term with which to compute correlations against each column of data + produced by `base_factor`. This term may be a Factor, a BoundColumn or + a Slice. If `target` is two-dimensional, correlations are computed + asset-wise. + correlation_length : int + Length of the lookback window over which to compute each correlation + coefficient. + mask : ziplime.pipeline.Filter, optional + A Filter describing which assets (columns) of `base_factor` should have + their correlation with `target` computed each day. + + See Also + -------- + :func:`scipy.stats.spearmanr` + :meth:`Factor.spearmanr` + :class:`ziplime.pipeline.factors.RollingSpearmanOfReturns` + + Notes + ----- + Most users should call Factor.spearmanr rather than directly construct an + instance of this class. + """ + + window_safe = True + + def compute(self, today, assets, out, base_data, target_data): + # If `target_data` is a Slice or single column of data, broadcast it + # out to the same shape as `base_data`, then compute column-wise. This + # is efficient because each column of the broadcasted array only refers + # to a single memory location. + target_data = broadcast_arrays(target_data, base_data)[0] + for i in range(len(out)): + out[i] = spearmanr(base_data[:, i], target_data[:, i])[0] diff --git a/ziplime/pipeline/terms/factors/statistical/rolling_spearman_of_returns.py b/ziplime/pipeline/terms/factors/statistical/rolling_spearman_of_returns.py new file mode 100644 index 0000000000..227d24fae5 --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/rolling_spearman_of_returns.py @@ -0,0 +1,50 @@ +from ziplime.pipeline.terms.asset_exists import AssetExists +from ziplime.pipeline.terms.factors import RollingSpearman, Returns +from ziplime.pipeline.terms.filters import SingleAsset + + +class RollingSpearmanOfReturns(RollingSpearman): + """ + Calculates the Spearman rank correlation coefficient of the returns of the + given asset with the returns of all other assets. + + Parameters + ---------- + target : ziplime.assets.Asset + The asset to correlate with all other assets. + returns_length : int >= 2 + Length of the lookback window over which to compute returns. Daily + returns require a window length of 2. + correlation_length : int >= 1 + Length of the lookback window over which to compute each correlation + coefficient. + mask : ziplime.pipeline.Filter, optional + A Filter describing which assets should have their correlation with the + target asset computed each day. + + Notes + ----- + Computing this factor over many assets can be time consuming. It is + recommended that a mask be used in order to limit the number of assets over + which correlations are computed. + + See Also + -------- + :class:`ziplime.pipeline.factors.RollingPearsonOfReturns` + :class:`ziplime.pipeline.factors.RollingLinearRegressionOfReturns` + """ + + def __new__(cls, target, returns_length, correlation_length, mask=None): + # Use the `SingleAsset` filter here because it protects against + # inputting a non-existent target asset. + returns = Returns( + window_length=returns_length, + mask=(AssetExists() | SingleAsset(asset=target)), + ) + return super(RollingSpearmanOfReturns, cls).__new__( + cls, + base_factor=returns, + target=returns[target], + correlation_length=correlation_length, + mask=mask, + ) diff --git a/ziplime/pipeline/terms/factors/statistical/simple_beta.py b/ziplime/pipeline/terms/factors/statistical/simple_beta.py new file mode 100644 index 0000000000..3aa19375f6 --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/simple_beta.py @@ -0,0 +1,84 @@ +from ziplime.pipeline.terms.asset_exists import AssetExists +from ziplime.pipeline.terms.factors import CustomFactor, Returns +from ziplime.pipeline.terms.factors.statistical.vectorized_beta import vectorized_beta +from ziplime.pipeline.terms.filters import SingleAsset +from ziplime.pipeline.mixins import StandardOutputs +from ziplime.utils.numpy_utils import ( + float64_dtype, +) + +from ziplime.assets.entities.asset import Asset + + + + + + +class SimpleBeta(CustomFactor, StandardOutputs): + """Factor producing the slope of a regression line between each asset's daily + returns to the daily returns of a single "target" asset. + + Parameters + ---------- + target : ziplime.Asset + Asset against which other assets should be regressed. + regression_length : int + Number of days of daily returns to use for the regression. + allowed_missing_percentage : float, optional + Percentage of returns observations (between 0 and 1) that are allowed + to be missing when calculating betas. Assets with more than this + percentage of returns observations missing will produce values of + NaN. Default behavior is that 25% of inputs can be missing. + """ + + window_safe = True + dtype = float64_dtype + params = ("allowed_missing_count",) + + def __new__(cls, target: Asset, regression_length: int, allowed_missing_percentage: int | float = 0.25): + if regression_length < 3: + raise ValueError("regression_length must be greater than or equal to 3") + if allowed_missing_percentage <= 0.0 or allowed_missing_percentage > 1.0: + raise ValueError("allowed_missing_percentage must be between 0.0 and 1.0") + daily_returns = Returns( + window_length=2, + mask=(AssetExists() | SingleAsset(asset=target)), + ) + allowed_missing_count = int(allowed_missing_percentage * regression_length) + return super(SimpleBeta, cls).__new__( + cls, + inputs=[daily_returns, daily_returns[target]], + window_length=regression_length, + allowed_missing_count=allowed_missing_count, + ) + + def compute( + self, today, assets, out, all_returns, target_returns, allowed_missing_count + ): + vectorized_beta( + dependents=all_returns, + independent=target_returns, + allowed_missing=allowed_missing_count, + out=out, + ) + + def graph_repr(self): + return "{}({!r}, {}, {})".format( + type(self).__name__, + str(self.target.symbol), # coerce from unicode to str in py2. + self.window_length, + self.params["allowed_missing_count"], + ) + + @property + def target(self): + """Get the target of the beta calculation.""" + return self.inputs[1].asset + + def __repr__(self): + return "{}({}, length={}, allowed_missing={})".format( + type(self).__name__, + self.target, + self.window_length, + self.params["allowed_missing_count"], + ) \ No newline at end of file diff --git a/ziplime/pipeline/terms/factors/statistical/vectorized_beta.py b/ziplime/pipeline/terms/factors/statistical/vectorized_beta.py new file mode 100644 index 0000000000..5fc9d9e453 --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/vectorized_beta.py @@ -0,0 +1,100 @@ +import numpy as np +from ziplime.utils.math_utils import nanmean + + +def vectorized_beta(dependents, independent, allowed_missing, out=None): + """Compute slopes of linear regressions between columns of ``dependents`` and + ``independent``. + + Parameters + ---------- + dependents : np.array[N, M] + Array with columns of data to be regressed against ``independent``. + independent : np.array[N, 1] + Independent variable of the regression + allowed_missing : int + Number of allowed missing (NaN) observations per column. Columns with + more than this many non-nan observations in either ``dependents`` or + ``independents`` will output NaN as the regression coefficient. + out : np.array[M] or None, optional + Output array into which to write results. If None, a new array is + created and returned. + + Returns + ------- + slopes : np.array[M] + Linear regression coefficients for each column of ``dependents``. + """ + # Cache these as locals since we're going to call them multiple times. + nan = np.nan + isnan = np.isnan + N, M = dependents.shape + + if out is None: + out = np.full(M, nan) + + # Copy N times as a column vector and fill with nans to have the same + # missing value pattern as the dependent variable. + # + # PERF_TODO: We could probably avoid the space blowup by doing this in + # Cython. + + # shape: (N, M) + independent = np.where( + isnan(dependents), + nan, + independent, + ) + + # Calculate beta as Cov(X, Y) / Cov(X, X). + # https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa + # + # NOTE: The usual formula for covariance is:: + # + # mean((X - mean(X)) * (Y - mean(Y))) + # + # However, we don't actually need to take the mean of both sides of the + # product, because of the folllowing equivalence:: + # + # Let X_res = (X - mean(X)). + # We have: + # + # mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y))) + # (1) = mean((X_res * Y) - (X_res * mean(Y))) + # (2) = mean(X_res * Y) - mean(X_res * mean(Y)) + # (3) = mean(X_res * Y) - mean(X_res) * mean(Y) + # (4) = mean(X_res * Y) - 0 * mean(Y) + # (5) = mean(X_res * Y) + # + # + # The tricky step in the above derivation is step (4). We know that + # mean(X_res) is zero because, for any X: + # + # mean(X - mean(X)) = mean(X) - mean(X) = 0. + # + # The upshot of this is that we only have to center one of `independent` + # and `dependent` when calculating covariances. Since we need the centered + # `independent` to calculate its variance in the next step, we choose to + # center `independent`. + + # shape: (N, M) + ind_residual = independent - nanmean(independent, axis=0) + + # shape: (M,) + covariances = nanmean(ind_residual * dependents, axis=0) + + # We end up with different variances in each column here because each + # column may have a different subset of the data dropped due to missing + # data in the corresponding dependent column. + # shape: (M,) + independent_variances = nanmean(ind_residual ** 2, axis=0) + + # shape: (M,) + np.divide(covariances, independent_variances, out=out) + + # Write nans back to locations where we have more then allowed number of + # missing entries. + nanlocs = isnan(independent).sum(axis=0) > allowed_missing + out[nanlocs] = nan + + return out diff --git a/ziplime/pipeline/terms/factors/statistical/vectorized_pearson_r.py b/ziplime/pipeline/terms/factors/statistical/vectorized_pearson_r.py new file mode 100644 index 0000000000..56f6e0c8a3 --- /dev/null +++ b/ziplime/pipeline/terms/factors/statistical/vectorized_pearson_r.py @@ -0,0 +1,75 @@ +from numexpr import evaluate +import numpy as np +from ziplime.utils.math_utils import nanmean + + +def vectorized_pearson_r(dependents, independents, allowed_missing, out=None): + """Compute Pearson's r between columns of ``dependents`` and ``independents``. + + Parameters + ---------- + dependents : np.array[N, M] + Array with columns of data to be regressed against ``independent``. + independents : np.array[N, M] or np.array[N, 1] + Independent variable(s) of the regression. If a single column is + passed, it is broadcast to the shape of ``dependents``. + allowed_missing : int + Number of allowed missing (NaN) observations per column. Columns with + more than this many non-nan observations in either ``dependents`` or + ``independents`` will output NaN as the correlation coefficient. + out : np.array[M] or None, optional + Output array into which to write results. If None, a new array is + created and returned. + + Returns + ------- + correlations : np.array[M] + Pearson correlation coefficients for each column of ``dependents``. + + See Also + -------- + :class:`ziplime.pipeline.factors.RollingPearson` + :class:`ziplime.pipeline.factors.RollingPearsonOfReturns` + """ + nan = np.nan + isnan = np.isnan + N, M = dependents.shape + + if out is None: + out = np.full(M, nan) + + if allowed_missing > 0: + # If we're handling nans robustly, we need to mask both arrays to + # locations where either was nan. + either_nan = isnan(dependents) | isnan(independents) + independents = np.where(either_nan, nan, independents) + dependents = np.where(either_nan, nan, dependents) + mean = nanmean + else: + # Otherwise, we can just use mean, which will give us a nan for any + # column where there's ever a nan. + mean = np.mean + + # Pearson R is Cov(X, Y) / StdDev(X) * StdDev(Y) + # c.f. https://en.wikipedia.org/wiki/Pearson_correlation_coefficient + ind_residual = independents - mean(independents, axis=0) + dep_residual = dependents - mean(dependents, axis=0) + + ind_variance = mean(ind_residual ** 2, axis=0) + dep_variance = mean(dep_residual ** 2, axis=0) + + covariances = mean(ind_residual * dep_residual, axis=0) + + evaluate( + "where(mask, nan, cov / sqrt(ind_variance * dep_variance))", + local_dict={ + "cov": covariances, + "mask": isnan(independents).sum(axis=0) > allowed_missing, + "nan": np.nan, + "ind_variance": ind_variance, + "dep_variance": dep_variance, + }, + global_dict={}, + out=out, + ) + return out diff --git a/ziplime/pipeline/terms/factors/technical/__init__.py b/ziplime/pipeline/terms/factors/technical/__init__.py new file mode 100644 index 0000000000..ab6ad0a5d0 --- /dev/null +++ b/ziplime/pipeline/terms/factors/technical/__init__.py @@ -0,0 +1,2 @@ +class BollingerBands: + pass \ No newline at end of file diff --git a/ziplime/pipeline/terms/factors/technical/aroon.py b/ziplime/pipeline/terms/factors/technical/aroon.py new file mode 100644 index 0000000000..897421bbe0 --- /dev/null +++ b/ziplime/pipeline/terms/factors/technical/aroon.py @@ -0,0 +1,45 @@ +from numexpr import evaluate + +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors import CustomFactor +from ziplime.utils.math_utils import nanargmax, nanargmin + + +class Aroon(CustomFactor): + """ + Aroon technical indicator. + https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator + + **Defaults Inputs:** :data:`ziplime.pipeline.data.EquityPricing.low`, \ + :data:`ziplime.pipeline.data.EquityPricing.high` + + Parameters + ---------- + window_length : int > 0 + Length of the lookback window over which to compute the Aroon + indicator. + """ # noqa + + inputs = (EquityPricing.low, EquityPricing.high) + outputs = ("down", "up") + + def compute(self, today, assets, out, lows, highs): + wl = self.window_length + high_date_index = nanargmax(highs, axis=0) + low_date_index = nanargmin(lows, axis=0) + evaluate( + "(100 * high_date_index) / (wl - 1)", + local_dict={ + "high_date_index": high_date_index, + "wl": wl, + }, + out=out.up, + ) + evaluate( + "(100 * low_date_index) / (wl - 1)", + local_dict={ + "low_date_index": low_date_index, + "wl": wl, + }, + out=out.down, + ) diff --git a/ziplime/pipeline/terms/factors/technical/bollinger_bands.py b/ziplime/pipeline/terms/factors/technical/bollinger_bands.py new file mode 100644 index 0000000000..529fb72c17 --- /dev/null +++ b/ziplime/pipeline/terms/factors/technical/bollinger_bands.py @@ -0,0 +1,33 @@ +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors import CustomFactor +from ziplime.utils.math_utils import nanmean, nanstd + + +class BollingerBands(CustomFactor): + """ + Bollinger Bands technical indicator. + https://en.wikipedia.org/wiki/Bollinger_Bands + + **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.close` + + Parameters + ---------- + inputs : length-1 iterable[BoundColumn] + The expression over which to compute bollinger bands. + window_length : int > 0 + Length of the lookback window over which to compute the bollinger + bands. + k : float + The number of standard deviations to add or subtract to create the + upper and lower bands. + """ + + params = ("k",) + inputs = (EquityPricing.close,) + outputs = "lower", "middle", "upper" + + def compute(self, today, assets, out, close, k): + difference = k * nanstd(close, axis=0) + out.middle = middle = nanmean(close, axis=0) + out.upper = middle + difference + out.lower = middle - difference diff --git a/ziplime/pipeline/terms/factors/technical/fast_stochastic_oscillator.py b/ziplime/pipeline/terms/factors/technical/fast_stochastic_oscillator.py new file mode 100644 index 0000000000..a29915d6ef --- /dev/null +++ b/ziplime/pipeline/terms/factors/technical/fast_stochastic_oscillator.py @@ -0,0 +1,48 @@ +from numexpr import evaluate + +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors import CustomFactor +from ziplime.utils.math_utils import nanmax, nanmin + + +class FastStochasticOscillator(CustomFactor): + """ + Fast Stochastic Oscillator Indicator [%K, Momentum Indicator] + https://wiki.timetotrade.eu/Stochastic + + This stochastic is considered volatile, and varies a lot when used in + market analysis. It is recommended to use the slow stochastic oscillator + or a moving average of the %K [%D]. + + **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.close`, \ + :data:`ziplime.pipeline.data.EquityPricing.low`, \ + :data:`ziplime.pipeline.data.EquityPricing.high` + + **Default Window Length:** 14 + + Returns + ------- + out: %K oscillator + """ + + inputs = (EquityPricing.close, EquityPricing.low, EquityPricing.high) + window_safe = True + window_length = 14 + + def compute(self, today, assets, out, closes, lows, highs): + + highest_highs = nanmax(highs, axis=0) + lowest_lows = nanmin(lows, axis=0) + today_closes = closes[-1] + + evaluate( + "((tc - ll) / (hh - ll)) * 100", + local_dict={ + "tc": today_closes, + "ll": lowest_lows, + "hh": highest_highs, + }, + global_dict={}, + out=out, + ) + diff --git a/ziplime/pipeline/terms/factors/technical/ichimoku_kinko_hyo.py b/ziplime/pipeline/terms/factors/technical/ichimoku_kinko_hyo.py new file mode 100644 index 0000000000..917fb468eb --- /dev/null +++ b/ziplime/pipeline/terms/factors/technical/ichimoku_kinko_hyo.py @@ -0,0 +1,76 @@ +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors import CustomFactor + + +class IchimokuKinkoHyo(CustomFactor): + """Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud). + http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud + + **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.high`, \ + :data:`ziplime.pipeline.data.EquityPricing.low`, \ + :data:`ziplime.pipeline.data.EquityPricing.close` + + **Default Window Length:** 52 + + Parameters + ---------- + window_length : int > 0 + The length the the window for the senkou span b. + tenkan_sen_length : int >= 0, <= window_length + The length of the window for the tenkan-sen. + kijun_sen_length : int >= 0, <= window_length + The length of the window for the kijou-sen. + chikou_span_length : int >= 0, <= window_length + The lag for the chikou span. + """ # noqa + + params = { + "tenkan_sen_length": 9, + "kijun_sen_length": 26, + "chikou_span_length": 26, + } + inputs = (EquityPricing.high, EquityPricing.low, EquityPricing.close) + outputs = ( + "tenkan_sen", + "kijun_sen", + "senkou_span_a", + "senkou_span_b", + "chikou_span", + ) + window_length = 52 + + def _validate(self): + super(IchimokuKinkoHyo, self)._validate() + for k, v in self.params.items(): + if v > self.window_length: + raise ValueError( + "%s must be <= the window_length: %s > %s" + % ( + k, + v, + self.window_length, + ), + ) + + def compute( + self, + today, + assets, + out, + high, + low, + close, + tenkan_sen_length, + kijun_sen_length, + chikou_span_length, + ): + + out.tenkan_sen = tenkan_sen = ( + high[-tenkan_sen_length:].max(axis=0) + low[-tenkan_sen_length:].min(axis=0) + ) / 2 + out.kijun_sen = kijun_sen = ( + high[-kijun_sen_length:].max(axis=0) + low[-kijun_sen_length:].min(axis=0) + ) / 2 + out.senkou_span_a = (tenkan_sen + kijun_sen) / 2 + out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2 + out.chikou_span = close[chikou_span_length] diff --git a/ziplime/pipeline/terms/factors/technical/moving_average_convergence_divergence_signal.py b/ziplime/pipeline/terms/factors/technical/moving_average_convergence_divergence_signal.py new file mode 100644 index 0000000000..c8ed93e9ec --- /dev/null +++ b/ziplime/pipeline/terms/factors/technical/moving_average_convergence_divergence_signal.py @@ -0,0 +1,82 @@ +from numpy import average + +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors import CustomFactor +from ziplime.pipeline.terms.factors.utils.exponential_weights import exponential_weights +from ziplime.utils.numpy_utils import rolling_window + + +class MovingAverageConvergenceDivergenceSignal(CustomFactor): + """ + Moving Average Convergence/Divergence (MACD) Signal line + https://en.wikipedia.org/wiki/MACD + + A technical indicator originally developed by Gerald Appel in the late + 1970's. MACD shows the relationship between two moving averages and + reveals changes in the strength, direction, momentum, and duration of a + trend in a stock's price. + + **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.close` + + Parameters + ---------- + fast_period : int > 0, optional + The window length for the "fast" EWMA. Default is 12. + slow_period : int > 0, > fast_period, optional + The window length for the "slow" EWMA. Default is 26. + signal_period : int > 0, < fast_period, optional + The window length for the signal line. Default is 9. + + Notes + ----- + Unlike most pipeline expressions, this factor does not accept a + ``window_length`` parameter. ``window_length`` is inferred from + ``slow_period`` and ``signal_period``. + """ + + inputs = (EquityPricing.close,) + # We don't use the default form of `params` here because we want to + # dynamically calculate `window_length` from the period lengths in our + # __new__. + params = ("fast_period", "slow_period", "signal_period") + + def __new__(cls, fast_period=12, slow_period=26, signal_period=9, *args, **kwargs): + if fast_period < 1: + raise ValueError("`fast_period` must be >= 1") + if slow_period < 1: + raise ValueError("`slow_period` must be >= 1") + if signal_period < 1: + raise ValueError("`signal_period` must be >= 1") + + if slow_period <= fast_period: + raise ValueError( + "'slow_period' must be greater than 'fast_period', but got\n" + "slow_period={slow}, fast_period={fast}".format( + slow=slow_period, + fast=fast_period, + ) + ) + + return super(MovingAverageConvergenceDivergenceSignal, cls).__new__( + cls, + fast_period=fast_period, + slow_period=slow_period, + signal_period=signal_period, + window_length=slow_period + signal_period - 1, + *args, + **kwargs, + ) + + def _ewma(self, data, length): + decay_rate = 1.0 - (2.0 / (1.0 + length)) + return average(data, axis=1, weights=exponential_weights(length, decay_rate)) + + def compute( + self, today, assets, out, close, fast_period, slow_period, signal_period + ): + slow_EWMA = self._ewma(rolling_window(close, slow_period), slow_period) + fast_EWMA = self._ewma( + rolling_window(close, fast_period)[-signal_period:], fast_period + ) + macd = fast_EWMA - slow_EWMA + out[:] = self._ewma(macd.T, signal_period) diff --git a/ziplime/pipeline/terms/factors/technical/rate_of_change_percentage.py b/ziplime/pipeline/terms/factors/technical/rate_of_change_percentage.py new file mode 100644 index 0000000000..d8223e9ee0 --- /dev/null +++ b/ziplime/pipeline/terms/factors/technical/rate_of_change_percentage.py @@ -0,0 +1,25 @@ +from numexpr import evaluate + +from ziplime.pipeline.terms.factors import CustomFactor + + +class RateOfChangePercentage(CustomFactor): + """ + Rate of change Percentage + ROC measures the percentage change in price from one period to the next. + The ROC calculation compares the current price with the price `n` + periods ago. + Formula for calculation: ((price - prevPrice) / prevPrice) * 100 + price - the current price + prevPrice - the price n days ago, equals window length + """ + + def compute(self, today, assets, out, close): + today_close = close[-1] + prev_close = close[0] + evaluate( + "((tc - pc) / pc) * 100", + local_dict={"tc": today_close, "pc": prev_close}, + global_dict={}, + out=out, + ) diff --git a/ziplime/pipeline/terms/factors/technical/rsi.py b/ziplime/pipeline/terms/factors/technical/rsi.py new file mode 100644 index 0000000000..df0f9f432b --- /dev/null +++ b/ziplime/pipeline/terms/factors/technical/rsi.py @@ -0,0 +1,37 @@ +from numpy import ( + abs, + clip, + diff, + inf, +) +from numexpr import evaluate + +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors import CustomFactor +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.utils.math_utils import nanmean + + +class RSI(SingleInputMixin, CustomFactor): + """ + Relative Strength Index + + **Default Inputs**: :data:`ziplime.pipeline.data.EquityPricing.close` + + **Default Window Length**: 15 + """ + + window_length = 15 + inputs = (EquityPricing.close,) + window_safe = True + + def compute(self, today, assets, out, closes): + diffs = diff(closes, axis=0) + ups = nanmean(clip(diffs, 0, inf), axis=0) + downs = abs(nanmean(clip(diffs, -inf, 0), axis=0)) + return evaluate( + "100 - (100 / (1 + (ups / downs)))", + local_dict={"ups": ups, "downs": downs}, + global_dict={}, + out=out, + ) diff --git a/ziplime/pipeline/terms/factors/technical/true_range.py b/ziplime/pipeline/terms/factors/technical/true_range.py new file mode 100644 index 0000000000..029cea5be5 --- /dev/null +++ b/ziplime/pipeline/terms/factors/technical/true_range.py @@ -0,0 +1,42 @@ +from numpy import abs, dstack + +from ziplime.pipeline.data import EquityPricing +from ziplime.pipeline.terms.factors import CustomFactor +from ziplime.utils.math_utils import nanmax + + +class TrueRange(CustomFactor): + """ + True Range + + A technical indicator originally developed by J. Welles Wilder, Jr. + Indicates the true degree of daily price change in an underlying. + + **Default Inputs:** :data:`ziplime.pipeline.data.EquityPricing.high`, \ + :data:`ziplime.pipeline.data.EquityPricing.low`, \ + :data:`ziplime.pipeline.data.EquityPricing.close` + + **Default Window Length:** 2 + """ + + inputs = ( + EquityPricing.high, + EquityPricing.low, + EquityPricing.close, + ) + window_length = 2 + + def compute(self, today, assets, out, highs, lows, closes): + high_to_low = highs[1:] - lows[1:] + high_to_prev_close = abs(highs[1:] - closes[:-1]) + low_to_prev_close = abs(lows[1:] - closes[:-1]) + out[:] = nanmax( + dstack( + ( + high_to_low, + high_to_prev_close, + low_to_prev_close, + ) + ), + 2, + ) diff --git a/ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/accounts/__init__.py b/ziplime/pipeline/terms/factors/utils/__init__.py similarity index 100% rename from ziplime/data/data_sources/grpc/grpc_stubs/grpc/tradeapi/v1/accounts/__init__.py rename to ziplime/pipeline/terms/factors/utils/__init__.py diff --git a/ziplime/pipeline/terms/factors/utils/exponential_weights.py b/ziplime/pipeline/terms/factors/utils/exponential_weights.py new file mode 100644 index 0000000000..6da877122e --- /dev/null +++ b/ziplime/pipeline/terms/factors/utils/exponential_weights.py @@ -0,0 +1,25 @@ +from numpy import arange, full + +from ziplime.utils.numpy_utils import float64_dtype + + +def exponential_weights(length, decay_rate): + """ + Build a weight vector for an exponentially-weighted statistic. + + The resulting ndarray is of the form:: + + [decay_rate ** length, ..., decay_rate ** 2, decay_rate] + + Parameters + ---------- + length : int + The length of the desired weight vector. + decay_rate : float + The rate at which entries in the weight vector increase or decrease. + + Returns + ------- + weights : ndarray[float64] + """ + return full(length, decay_rate, float64_dtype) ** arange(length + 1, 1, -1) diff --git a/ziplime/pipeline/terms/factors/utils/grouped_row_transform_utils.py b/ziplime/pipeline/terms/factors/utils/grouped_row_transform_utils.py new file mode 100644 index 0000000000..b5ceee5038 --- /dev/null +++ b/ziplime/pipeline/terms/factors/utils/grouped_row_transform_utils.py @@ -0,0 +1,46 @@ +import numpy as np +from math import ceil + +from numpy import isnan + +from ziplime.utils.math_utils import nanmean, nanstd + +# Functions to be passed to GroupedRowTransform. These aren't defined inline +# because the transformation function is part of the instance hash key. +def demean(row): + return row - nanmean(row) + + +def zscore(row): + with np.errstate(divide="ignore", invalid="ignore"): + return (row - nanmean(row)) / nanstd(row) + + +def winsorize(row, min_percentile, max_percentile): + """ + This implementation is based on scipy.stats.mstats.winsorize + """ + a = row.copy() + nan_count = isnan(row).sum() + nonnan_count = a.size - nan_count + + # NOTE: argsort() sorts nans to the end of the array. + idx = a.argsort() + + # Set values at indices below the min percentile to the value of the entry + # at the cutoff. + if min_percentile > 0: + lower_cutoff = int(min_percentile * nonnan_count) + a[idx[:lower_cutoff]] = a[idx[lower_cutoff]] + + # Set values at indices above the max percentile to the value of the entry + # at the cutoff. + if max_percentile < 1: + upper_cutoff = int(ceil(nonnan_count * max_percentile)) + # if max_percentile is close to 1, then upper_cutoff might not + # remove any values. + if upper_cutoff < nonnan_count: + start_of_nans = (-nan_count) if nan_count else None + a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]] + + return a diff --git a/ziplime/pipeline/terms/factors/utils/operators.py b/ziplime/pipeline/terms/factors/utils/operators.py new file mode 100644 index 0000000000..b3caeb2890 --- /dev/null +++ b/ziplime/pipeline/terms/factors/utils/operators.py @@ -0,0 +1,346 @@ +from operator import attrgetter +from numbers import Number +from textwrap import dedent + + +from ziplime.pipeline.errors.bad_binary_operator import BadBinaryOperator +from ziplime.pipeline.terms.term import Term +from ziplime.utils.compat import wraps + +from ziplime.pipeline.expression import ( + is_comparison, + method_name_for_op, + NumericalExpression, + NUMEXPR_MATH_FUNCS, + unary_op_name, +) +from ziplime.pipeline.terms.filters import NumExprFilter + +from ziplime.utils.functional import with_doc, with_name +from ziplime.utils.numpy_utils import ( + bool_dtype, + coerce_to_dtype, + float64_dtype, +) + + +def coerce_numbers_to_my_dtype(f): + """ + A decorator for methods whose signature is f(self, other) that coerces + ``other`` to ``self.dtype``. + + This is used to make comparison operations between numbers and `Factor` + instances work independently of whether the user supplies a float or + integer literal. + + For example, if I write:: + + my_filter = my_factor > 3 + + my_factor probably has dtype float64, but 3 is an int, so we want to coerce + to float64 before doing the comparison. + """ + + @wraps(f) + def method(self, other): + if isinstance(other, Number): + other = coerce_to_dtype(self.dtype, other) + return f(self, other) + + return method + + +def binop_return_dtype(op, left, right): + """ + Compute the expected return dtype for the given binary operator. + + Parameters + ---------- + op : str + Operator symbol, (e.g. '+', '-', ...). + left : numpy.dtype + Dtype of left hand side. + right : numpy.dtype + Dtype of right hand side. + + Returns + ------- + outdtype : numpy.dtype + The dtype of the result of `left right`. + """ + if is_comparison(op): + if left != right: + raise TypeError( + "Don't know how to compute {left} {op} {right}.\n" + "Comparisons are only supported between Factors of equal " + "dtypes.".format(left=left, op=op, right=right) + ) + return bool_dtype + + elif left != float64_dtype or right != float64_dtype: + raise TypeError( + "Don't know how to compute {left} {op} {right}.\n" + "Arithmetic operators are only supported between Factors of " + "dtype 'float64'.".format( + left=left.name, + op=op, + right=right.name, + ) + ) + return float64_dtype + + +BINOP_DOCSTRING_TEMPLATE = """ +Construct a :class:`~ziplime.pipeline.{rtype}` computing ``self {op} other``. + +Parameters +---------- +other : ziplime.pipeline.Factor, float + Right-hand side of the expression. + +Returns +------- +{ret} +""" + +BINOP_RETURN_FILTER = """\ +filter : ziplime.pipeline.Filter + Filter computing ``self {op} other`` with the outputs of ``self`` and + ``other``. +""" + +BINOP_RETURN_FACTOR = """\ +factor : ziplime.pipeline.Factor + Factor computing ``self {op} other`` with outputs of ``self`` and + ``other``. +""" + + +def binary_operator(op): + + """ + Factory function for making binary operator methods on a Factor subclass. + + Returns a function, "binary_operator" suitable for implementing functions + like __add__. + """ + # When combining a Factor with a NumericalExpression, we use this + # attrgetter instance to defer to the commuted implementation of the + # NumericalExpression operator. + commuted_method_getter = attrgetter(method_name_for_op(op, commute=True)) + + is_compare = is_comparison(op) + + if is_compare: + ret_doc = BINOP_RETURN_FILTER.format(op=op) + rtype = "Filter" + else: + ret_doc = BINOP_RETURN_FACTOR.format(op=op) + rtype = "Factor" + + docstring = BINOP_DOCSTRING_TEMPLATE.format( + op=op, + ret=ret_doc, + rtype=rtype, + ) + + @with_doc(docstring) + @with_name(method_name_for_op(op)) + @coerce_numbers_to_my_dtype + def binary_operator(self, other): + from ziplime.pipeline.terms.factors.num_expr_factor import NumExprFactor + + # This can't be hoisted up a scope because the types returned by + # binop_return_type aren't defined when the top-level function is + # invoked in the class body of Factor. + return_type = NumExprFilter if is_compare else NumExprFactor + + if isinstance(self, NumExprFactor): + self_expr, other_expr, new_inputs = self.build_binary_op( + op, + other, + ) + return return_type( + "({left}) {op} ({right})".format( + left=self_expr, + op=op, + right=other_expr, + ), + new_inputs, + dtype=binop_return_dtype(op, self.dtype, other.dtype), + ) + elif isinstance(other, NumExprFactor): + # NumericalExpression overrides ops to correctly handle merging of + # inputs. Look up and call the appropriate reflected operator with + # ourself as the input. + return commuted_method_getter(other)(self) + elif isinstance(other, Term): + if self is other: + return return_type( + "x_0 {op} x_0".format(op=op), + (self,), + dtype=binop_return_dtype(op, self.dtype, other.dtype), + ) + return return_type( + "x_0 {op} x_1".format(op=op), + (self, other), + dtype=binop_return_dtype(op, self.dtype, other.dtype), + ) + elif isinstance(other, Number): + return return_type( + "x_0 {op} ({constant})".format(op=op, constant=other), + binds=(self,), + # .dtype access is safe here because coerce_numbers_to_my_dtype + # will convert any input numbers to numpy equivalents. + dtype=binop_return_dtype(op, self.dtype, other.dtype), + ) + raise BadBinaryOperator(op, self, other) + + return binary_operator + + +def reflected_binary_operator(op): + """ + Factory function for making binary operator methods on a Factor. + + Returns a function, "reflected_binary_operator" suitable for implementing + functions like __radd__. + """ + + assert not is_comparison(op) + + @with_name(method_name_for_op(op, commute=True)) + @coerce_numbers_to_my_dtype + def reflected_binary_operator(self, other): + from ziplime.pipeline.terms.factors.num_expr_factor import NumExprFactor + + if isinstance(self, NumericalExpression): + self_expr, other_expr, new_inputs = self.build_binary_op(op, other) + return NumExprFactor( + "({left}) {op} ({right})".format( + left=other_expr, + right=self_expr, + op=op, + ), + new_inputs, + dtype=binop_return_dtype(op, other.dtype, self.dtype), + ) + + # Only have to handle the numeric case because in all other valid cases + # the corresponding left-binding method will be called. + elif isinstance(other, Number): + return NumExprFactor( + "{constant} {op} x_0".format(op=op, constant=other), + binds=(self,), + dtype=binop_return_dtype(op, other.dtype, self.dtype), + ) + raise BadBinaryOperator(op, other, self) + + return reflected_binary_operator + + +def unary_operator(op): + """ + Factory function for making unary operator methods for Factors. + """ + + # Only negate is currently supported. + valid_ops = {"-"} + if op not in valid_ops: + raise ValueError("Invalid unary operator %s." % op) + + @with_doc("Unary Operator: '%s'" % op) + @with_name(unary_op_name(op)) + def unary_operator(self): + from ziplime.pipeline.terms.factors.num_expr_factor import NumExprFactor + + if self.dtype != float64_dtype: + raise TypeError( + "Can't apply unary operator {op!r} to instance of " + "{typename!r} with dtype {dtypename!r}.\n" + "{op!r} is only supported for Factors of dtype " + "'float64'.".format( + op=op, + typename=type(self).__name__, + dtypename=self.dtype.name, + ) + ) + + # This can't be hoisted up a scope because the types returned by + # unary_op_return_type aren't defined when the top-level function is + # invoked. + if isinstance(self, NumericalExpression): + return NumExprFactor( + "{op}({expr})".format(op=op, expr=self._expr), + self.inputs, + dtype=float64_dtype, + ) + else: + return NumExprFactor( + "{op}x_0".format(op=op), + (self,), + dtype=float64_dtype, + ) + + return unary_operator + + +def function_application(func): + """ + Factory function for producing function application methods for Factor + subclasses. + """ + + if func not in NUMEXPR_MATH_FUNCS: + raise ValueError("Unsupported mathematical function '%s'" % func) + + docstring = dedent( + """\ + Construct a Factor that computes ``{}()`` on each output of ``self``. + + Returns + ------- + factor : ziplime.pipeline.Factor + """.format( + func + ) + ) + + @with_doc(docstring) + @with_name(func) + def mathfunc(self): + from ziplime.pipeline.terms.factors.num_expr_factor import NumExprFactor + + if isinstance(self, NumericalExpression): + return NumExprFactor( + "{func}({expr})".format(func=func, expr=self._expr), + self.inputs, + dtype=float64_dtype, + ) + else: + return NumExprFactor( + "{func}(x_0)".format(func=func), + (self,), + dtype=float64_dtype, + ) + + return mathfunc + + +# Decorators for Factor methods. +# if_not_float64_tell_caller_to_use_isnull = restrict_to_dtype( +# dtype=float64_dtype, +# message_template=( +# "{method_name}() was called on a factor of dtype {received_dtype}.\n" +# "{method_name}() is only defined for dtype {expected_dtype}." +# "To filter missing data, use isnull() or notnull()." +# ), +# ) + +# float64_only = restrict_to_dtype( +# dtype=float64_dtype, +# message_template=( +# "{method_name}() is only defined on Factors of dtype {expected_dtype}," +# " but it was called on a Factor of dtype {received_dtype}." +# ), +# ) \ No newline at end of file diff --git a/ziplime/pipeline/terms/factors/utils/summary_funcs.py b/ziplime/pipeline/terms/factors/utils/summary_funcs.py new file mode 100644 index 0000000000..fec9f58977 --- /dev/null +++ b/ziplime/pipeline/terms/factors/utils/summary_funcs.py @@ -0,0 +1,98 @@ +from textwrap import dedent + + + +from ziplime.pipeline.terms.filters import Filter + +from ziplime.utils.math_utils import ( + nanmax, + nanmean, + nanmedian, + nanmin, + nanstd, + nansum, +) +from ziplime.utils.numpy_utils import is_missing + + + +CORRELATION_METHOD_NOTE = dedent( + """\ + This method can only be called on expressions which are deemed safe for use + as inputs to windowed :class:`~ziplime.pipeline.Factor` objects. Examples + of such expressions include This includes + :class:`~ziplime.pipeline.data.BoundColumn` + :class:`~ziplime.pipeline.factors.Returns` and any factors created from + :meth:`~ziplime.pipeline.Factor.rank` or + :meth:`~ziplime.pipeline.Factor.zscore`. + """ +) + + +class summary_funcs: + """Namespace of functions meant to be used with DailySummary.""" + + @staticmethod + def mean(a, missing_value): + return nanmean(a, axis=1) + + @staticmethod + def stddev(a, missing_value): + return nanstd(a, axis=1) + + @staticmethod + def max(a, missing_value): + return nanmax(a, axis=1) + + @staticmethod + def min(a, missing_value): + return nanmin(a, axis=1) + + @staticmethod + def median(a, missing_value): + return nanmedian(a, axis=1) + + @staticmethod + def sum(a, missing_value): + return nansum(a, axis=1) + + @staticmethod + def notnull_count(a, missing_value): + return (~is_missing(a, missing_value)).sum(axis=1) + + names = {k for k in locals() if not k.startswith("_")} + + +def summary_method(name): + + func = getattr(summary_funcs, name) + + #@float64_only + def f(self, mask: Filter | None = None): + + """Create a 1-dimensional factor computing the {} of self, each day. + + Parameters + ---------- + mask : ziplime.pipeline.Filter, optional + A Filter representing assets to consider when computing results. + If supplied, we ignore asset/date pairs where ``mask`` produces + ``False``. + + Returns + ------- + result : ziplime.pipeline.Factor + """ + from ziplime.pipeline.terms.factors.daily_summary import DailySummary + + return DailySummary( + func, + self, + mask=mask, + dtype=self.dtype, + ) + + f.__name__ = func.__name__ + f.__doc__ = f.__doc__.format(f.__name__) + + return f \ No newline at end of file diff --git a/ziplime/pipeline/terms/filters/__init__.py b/ziplime/pipeline/terms/filters/__init__.py new file mode 100644 index 0000000000..4830026c9b --- /dev/null +++ b/ziplime/pipeline/terms/filters/__init__.py @@ -0,0 +1,36 @@ +from .array_predicate import ArrayPredicate +from .custom_filter import CustomFilter +from .all_present import AllPresent +from .filter import Filter +from .latest import Latest +from .maximum_filter import MaximumFilter +from .not_null_filter import NotNullFilter +from .null_filter import NullFilter +from .num_expr_filter import NumExprFilter +from .percentile_filter import PercentileFilter +from .single_asset import SingleAsset +from .all import All +from .any import Any +from .at_least_n import AtLeastN +from .static_assets import StaticAssets +from .static_sids import StaticSids + +__all__ = [ + "All", + "AllPresent", + "Any", + "ArrayPredicate", + "AtLeastN", + "CustomFilter", + "Filter", + "Latest", + "MaximumFilter", + "NotNullFilter", + "NullFilter", + "NumExprFilter", + "PercentileFilter", + "SingleAsset", + "StaticAssets", + "StaticSids", +] + diff --git a/ziplime/pipeline/terms/filters/all.py b/ziplime/pipeline/terms/filters/all.py new file mode 100644 index 0000000000..d12e25861a --- /dev/null +++ b/ziplime/pipeline/terms/filters/all.py @@ -0,0 +1,15 @@ +from ziplime.pipeline.terms.filters.custom_filter import CustomFilter + + +class All(CustomFilter): + """ + A Filter requiring that assets produce True for ``window_length`` + consecutive days. + + **Default Inputs:** None + + **Default Window Length:** None + """ + + def compute(self, today, assets, out, arg): + out[:] = arg.sum(axis=0) == self.window_length diff --git a/ziplime/pipeline/terms/filters/all_present.py b/ziplime/pipeline/terms/filters/all_present.py new file mode 100644 index 0000000000..6269ed6c27 --- /dev/null +++ b/ziplime/pipeline/terms/filters/all_present.py @@ -0,0 +1,29 @@ +from ziplime.lib.labelarray import LabelArray +from ziplime.pipeline.mixins import SingleInputMixin, StandardOutputs +from ziplime.pipeline.terms.filters.custom_filter import CustomFilter +from ziplime.pipeline.terms.filters.filter import Filter +from numpy import ( + any as np_any, +) +from ziplime.utils.numpy_utils import is_missing + + +class AllPresent(CustomFilter, SingleInputMixin, StandardOutputs): + """Pipeline filter indicating input term has data for a given window.""" + + def _validate(self): + + if isinstance(self.inputs[0], Filter): + raise TypeError("Input to filter `AllPresent` cannot be a Filter.") + + return super(AllPresent, self)._validate() + + def compute(self, today, assets, out, value): + if isinstance(value, LabelArray): + out[:] = ~np_any(value.is_missing(), axis=0) + else: + out[:] = ~np_any( + is_missing(value, self.inputs[0].missing_value), + axis=0, + ) + diff --git a/ziplime/pipeline/terms/filters/any.py b/ziplime/pipeline/terms/filters/any.py new file mode 100644 index 0000000000..063d10069a --- /dev/null +++ b/ziplime/pipeline/terms/filters/any.py @@ -0,0 +1,16 @@ +from ziplime.pipeline.terms.filters.custom_filter import CustomFilter + + +class Any(CustomFilter): + """ + A Filter requiring that assets produce True for at least one day in the + last ``window_length`` days. + + **Default Inputs:** None + + **Default Window Length:** None + """ + + def compute(self, today, assets, out, arg): + out[:] = arg.sum(axis=0) > 0 + diff --git a/ziplime/pipeline/terms/filters/array_predicate.py b/ziplime/pipeline/terms/filters/array_predicate.py new file mode 100644 index 0000000000..5613a5da0a --- /dev/null +++ b/ziplime/pipeline/terms/filters/array_predicate.py @@ -0,0 +1,44 @@ +from ziplime.pipeline.terms.term import Term +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.pipeline.terms.filters.filter import Filter + + +class ArrayPredicate(SingleInputMixin, Filter): + """ + A filter applying a function from (ndarray, *args) -> ndarray[bool]. + + Parameters + ---------- + term : ziplime.pipeline.Term + Term producing the array over which the predicate will be computed. + op : function(ndarray, *args) -> ndarray[bool] + Function to apply to the result of `term`. + opargs : tuple[hashable] + Additional argument to apply to ``op``. + """ + + params = ("op", "opargs") + window_length = 0 + + def __new__(cls, term: Term, op, opargs: tuple): + hash(opargs) # fail fast if opargs isn't hashable. + return super(ArrayPredicate, cls).__new__( + ArrayPredicate, + op=op, + opargs=opargs, + inputs=(term,), + mask=term.mask, + ) + + def _compute(self, arrays, dates, assets, mask): + params = self.params + data = arrays[0] + return params["op"](data, *params["opargs"]) & mask + + def graph_repr(self): + # Graphviz interprets `\l` as "divide label into lines, left-justified" + return "{}:\\l op: {}.{}()".format( + type(self).__name__, + self.params["op"].__module__, + self.params["op"].__name__, + ) diff --git a/ziplime/pipeline/terms/filters/at_least_n.py b/ziplime/pipeline/terms/filters/at_least_n.py new file mode 100644 index 0000000000..80000b79d4 --- /dev/null +++ b/ziplime/pipeline/terms/filters/at_least_n.py @@ -0,0 +1,17 @@ +from ziplime.pipeline.terms.filters.custom_filter import CustomFilter + + +class AtLeastN(CustomFilter): + """ + A Filter requiring that assets produce True for at least N days in the + last ``window_length`` days. + + **Default Inputs:** None + + **Default Window Length:** None + """ + + params = ("N",) + + def compute(self, today, assets, out, arg, N): + out[:] = arg.sum(axis=0) >= N diff --git a/ziplime/pipeline/terms/filters/custom_filter.py b/ziplime/pipeline/terms/filters/custom_filter.py new file mode 100644 index 0000000000..d07e1302ae --- /dev/null +++ b/ziplime/pipeline/terms/filters/custom_filter.py @@ -0,0 +1,75 @@ +from ziplime.errors import UnsupportedDataType +from ziplime.pipeline.dtypes import CLASSIFIER_DTYPES, FACTOR_DTYPES +from ziplime.pipeline.mixins import PositiveWindowLengthMixin, CustomTermMixin +from ziplime.pipeline.terms.filters.filter import Filter + + +class CustomFilter(PositiveWindowLengthMixin, CustomTermMixin, Filter): + """ + Base class for user-defined Filters. + + Parameters + ---------- + inputs : iterable, optional + An iterable of `BoundColumn` instances (e.g. USEquityPricing.close), + describing the data to load and pass to ``self.compute``. If this + argument is passed to the CustomFilter constructor, we look for a + class-level attribute named ``inputs``. + window_length : int, optional + Number of rows to pass for each input. If this argument is not passed + to the CustomFilter constructor, we look for a class-level attribute + named `window_length`. + + Notes + ----- + Users implementing their own Filters should subclass CustomFilter and + implement a method named ``compute`` with the following signature: + + .. code-block:: python + + def compute(self, today, assets, out, *inputs): + ... + + On each simulation date, ``compute`` will be called with the current date, + an array of sids, an output array, and an input array for each expression + passed as inputs to the CustomFilter constructor. + + The specific types of the values passed to ``compute`` are as follows:: + + today : np.datetime64[ns] + Row label for the last row of all arrays passed as `inputs`. + assets : np.array[int64, ndim=1] + Column labels for `out` and`inputs`. + out : np.array[bool, ndim=1] + Output array of the same shape as `assets`. `compute` should write + its desired return values into `out`. + *inputs : tuple of np.array + Raw data arrays corresponding to the values of `self.inputs`. + + See the documentation for + :class:`~ziplime.pipeline.CustomFactor` for more details on + implementing a custom ``compute`` method. + + See Also + -------- + ziplime.pipeline.CustomFactor + """ + + def _validate(self): + try: + super(CustomFilter, self)._validate() + except UnsupportedDataType as exc: + if self.dtype in CLASSIFIER_DTYPES: + raise UnsupportedDataType( + typename=type(self).__name__, + dtype=self.dtype, + hint="Did you mean to create a CustomClassifier?", + ) from exc + elif self.dtype in FACTOR_DTYPES: + raise UnsupportedDataType( + typename=type(self).__name__, + dtype=self.dtype, + hint="Did you mean to create a CustomFactor?", + ) from exc + raise + diff --git a/ziplime/pipeline/terms/filters/filter.py b/ziplime/pipeline/terms/filters/filter.py new file mode 100644 index 0000000000..49972f3b10 --- /dev/null +++ b/ziplime/pipeline/terms/filters/filter.py @@ -0,0 +1,277 @@ +from operator import attrgetter + +from ziplime.errors import UnsupportedDataType +from ziplime.pipeline.dtypes import FILTER_DTYPES +from ziplime.pipeline.errors.bad_binary_operator import BadBinaryOperator +from ziplime.pipeline.expression import ( + FILTER_BINOPS, + method_name_for_op, + NumericalExpression, +) +from ziplime.pipeline.mixins import IfElseMixin, RestrictedDTypeMixin +from ziplime.pipeline.terms.term import Term +from ziplime.pipeline.terms.computable_term import ComputableTerm + +from ziplime.utils.numpy_utils import same, bool_dtype + + +def binary_operator(op): + """ + Factory function for making binary operator methods on a Filter subclass. + + Returns a function "binary_operator" suitable for implementing functions + like __and__ or __or__. + """ + # When combining a Filter with a NumericalExpression, we use this + # attrgetter instance to defer to the commuted interpretation of the + # NumericalExpression operator. + commuted_method_getter = attrgetter(method_name_for_op(op, commute=True)) + + def binary_operator(self, other): + from ziplime.pipeline.terms.filters.num_expr_filter import NumExprFilter + if isinstance(self, NumericalExpression): + self_expr, other_expr, new_inputs = self.build_binary_op( + op, + other, + ) + return NumExprFilter.create( + "({left}) {op} ({right})".format( + left=self_expr, + op=op, + right=other_expr, + ), + new_inputs, + ) + elif isinstance(other, NumericalExpression): + # NumericalExpression overrides numerical ops to correctly handle + # merging of inputs. Look up and call the appropriate + # right-binding operator with ourself as the input. + return commuted_method_getter(other)(self) + elif isinstance(other, Term): + if other.dtype != bool_dtype: + raise BadBinaryOperator(op, self, other) + if self is other: + return NumExprFilter.create( + "x_0 {op} x_0".format(op=op), + (self,), + ) + return NumExprFilter.create( + "x_0 {op} x_1".format(op=op), + (self, other), + ) + elif isinstance(other, int): # Note that this is true for bool as well + return NumExprFilter.create( + "x_0 {op} {constant}".format(op=op, constant=int(other)), + binds=(self,), + ) + raise BadBinaryOperator(op, self, other) + + binary_operator.__doc__ = "Binary Operator: '%s'" % op + return binary_operator + + +def unary_operator(op): + """ + Factory function for making unary operator methods for Filters. + """ + valid_ops = {"~"} + if op not in valid_ops: + raise ValueError("Invalid unary operator %s." % op) + + def unary_operator(self): + from ziplime.pipeline.terms.filters.num_expr_filter import NumExprFilter + # This can't be hoisted up a scope because the types returned by + # unary_op_return_type aren't defined when the top-level function is + # invoked. + if isinstance(self, NumericalExpression): + return NumExprFilter.create( + "{op}({expr})".format(op=op, expr=self._expr), + self.inputs, + ) + else: + return NumExprFilter.create("{op}x_0".format(op=op), (self,)) + + unary_operator.__doc__ = "Unary Operator: '%s'" % op + return unary_operator + + +class Filter(RestrictedDTypeMixin, ComputableTerm): + """ + Pipeline expression computing a boolean output. + + Filters are most commonly useful for describing sets of assets to include + or exclude for some particular purpose. Many Pipeline API functions accept + a ``mask`` argument, which can be supplied a Filter indicating that only + values passing the Filter should be considered when performing the + requested computation. For example, :meth:`ziplime.pipeline.Factor.top` + accepts a mask indicating that ranks should be computed only on assets that + passed the specified Filter. + + The most common way to construct a Filter is via one of the comparison + operators (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``) of + :class:`~ziplime.pipeline.Factor`. For example, a natural way to construct + a Filter for stocks with a 10-day VWAP less than $20.0 is to first + construct a Factor computing 10-day VWAP and compare it to the scalar value + 20.0:: + + >>> from ziplime.pipeline.factors import VWAP + >>> vwap_10 = VWAP(window_length=10) + >>> vwaps_under_20 = (vwap_10 <= 20) + + Filters can also be constructed via comparisons between two Factors. For + example, to construct a Filter producing True for asset/date pairs where + the asset's 10-day VWAP was greater than it's 30-day VWAP:: + + >>> short_vwap = VWAP(window_length=10) + >>> long_vwap = VWAP(window_length=30) + >>> higher_short_vwap = (short_vwap > long_vwap) + + Filters can be combined via the ``&`` (and) and ``|`` (or) operators. + + ``&``-ing together two filters produces a new Filter that produces True if + **both** of the inputs produced True. + + ``|``-ing together two filters produces a new Filter that produces True if + **either** of its inputs produced True. + + The ``~`` operator can be used to invert a Filter, swapping all True values + with Falses and vice-versa. + + Filters may be set as the ``screen`` attribute of a Pipeline, indicating + asset/date pairs for which the filter produces False should be excluded + from the Pipeline's output. This is useful both for reducing noise in the + output of a Pipeline and for reducing memory consumption of Pipeline + results. + """ + + # Filters are window-safe by default, since a yes/no decision means the + # same thing from all temporal perspectives. + window_safe = True + + # Used by RestrictedDTypeMixin + ALLOWED_DTYPES = FILTER_DTYPES + dtype = bool_dtype + + clsdict = locals() + clsdict.update( + {method_name_for_op(op): binary_operator(op) for op in FILTER_BINOPS} + ) + clsdict.update( + { + method_name_for_op(op, commute=True): binary_operator(op) + for op in FILTER_BINOPS + } + ) + + __invert__ = unary_operator("~") + + def _validate(self): + # Run superclass validation first so that we handle `dtype not passed` + # before this. + retval = super(Filter, self)._validate() + if self.dtype != bool_dtype: + raise UnsupportedDataType(typename=type(self).__name__, dtype=self.dtype) + return retval + + @classmethod + def _principal_computable_term_type(cls): + return Filter + + def if_else(self, if_true: ComputableTerm, if_false: ComputableTerm): + """ + Create a term that selects values from one of two choices. + + Parameters + ---------- + if_true : ziplime.pipeline.term.ComputableTerm + Expression whose values should be used at locations where this + filter outputs True. + if_false : ziplime.pipeline.term.ComputableTerm + Expression whose values should be used at locations where this + filter outputs False. + + Returns + ------- + merged : ziplime.pipeline.term.ComputableTerm + A term that computes by taking values from either ``if_true`` or + ``if_false``, depending on the values produced by ``self``. + + The returned term draws from``if_true`` at locations where ``self`` + produces True, and it draws from ``if_false`` at locations where + ``self`` produces False. + + Example + ------- + + Let ``f`` be a Factor that produces the following output:: + + AAPL MSFT MCD BK + 2017-03-13 1.0 2.0 3.0 4.0 + 2017-03-14 5.0 6.0 7.0 8.0 + + Let ``g`` be another Factor that produces the following output:: + + AAPL MSFT MCD BK + 2017-03-13 10.0 20.0 30.0 40.0 + 2017-03-14 50.0 60.0 70.0 80.0 + + Finally, let ``condition`` be a Filter that produces the following + output:: + + AAPL MSFT MCD BK + 2017-03-13 True False True False + 2017-03-14 True True False False + + Then, the expression ``condition.if_else(f, g)`` produces the following + output:: + + AAPL MSFT MCD BK + 2017-03-13 1.0 20.0 3.0 40.0 + 2017-03-14 5.0 6.0 70.0 80.0 + + See Also + -------- + numpy.where + Factor.fillna + """ + true_type = if_true._principal_computable_term_type() + false_type = if_false._principal_computable_term_type() + + if true_type is not false_type: + raise TypeError( + "Mismatched types in if_else(): if_true={}, but if_false={}".format( + true_type.__name__, false_type.__name__ + ) + ) + + if if_true.dtype != if_false.dtype: + raise TypeError( + "Mismatched dtypes in if_else(): " + "if_true.dtype = {}, if_false.dtype = {}".format( + if_true.dtype, if_false.dtype + ) + ) + + if if_true.outputs != if_false.outputs: + raise ValueError( + "Mismatched outputs in if_else(): " + "if_true.outputs = {}, if_false.outputs = {}".format( + if_true.outputs, if_false.outputs + ), + ) + + if not same(if_true.missing_value, if_false.missing_value): + raise ValueError( + "Mismatched missing values in if_else(): " + "if_true.missing_value = {!r}, if_false.missing_value = {!r}".format( + if_true.missing_value, if_false.missing_value + ) + ) + + return_type = type(if_true)._with_mixin(IfElseMixin) + + return return_type( + condition=self, + if_true=if_true, + if_false=if_false, + ) diff --git a/ziplime/pipeline/terms/filters/latest.py b/ziplime/pipeline/terms/filters/latest.py new file mode 100644 index 0000000000..4370f250be --- /dev/null +++ b/ziplime/pipeline/terms/filters/latest.py @@ -0,0 +1,10 @@ +from ziplime.pipeline.mixins import LatestMixin +from ziplime.pipeline.terms.filters.custom_filter import CustomFilter + + +class Latest(LatestMixin, CustomFilter): + """ + Filter producing the most recently-known value of `inputs[0]` on each day. + """ + + pass diff --git a/ziplime/pipeline/terms/filters/maximum_filter.py b/ziplime/pipeline/terms/filters/maximum_filter.py new file mode 100644 index 0000000000..e0fd88fd84 --- /dev/null +++ b/ziplime/pipeline/terms/filters/maximum_filter.py @@ -0,0 +1,68 @@ +from numpy import ( + uint8, +) +from ziplime.lib.rank import is_missing, grouped_masked_is_maximal + +from ziplime.pipeline.mixins import ( + StandardOutputs, +) +from ziplime.pipeline.terms.filters import Filter +from ziplime.utils.numpy_utils import ( + int64_dtype, +) + + + + + +class MaximumFilter(Filter, StandardOutputs): + """Pipeline filter that selects the top asset, possibly grouped and masked.""" + + window_length = 0 + + def __new__(cls, factor, groupby, mask): + if groupby is None: + from ziplime.pipeline.terms.classifiers.everything import Everything + + groupby = Everything() + + return super(MaximumFilter, cls).__new__( + cls, + inputs=(factor, groupby), + mask=mask, + ) + + def _compute(self, arrays, dates, assets, mask): + # XXX: We're doing a lot of unncessary work here if `groupby` isn't + # specified. + data = arrays[0] + group_labels, null_label = self.inputs[1]._to_integral(arrays[1]) + effective_mask = ( + mask + & (group_labels != null_label) + & ~is_missing(data, self.inputs[0].missing_value) + ).view(uint8) + + return grouped_masked_is_maximal( + # Unconditionally view the data as int64. + # This is safe because casting from float64 to int64 is an + # order-preserving operation. + data.view(int64_dtype), + # PERF: Consider supporting different sizes of group labels. + group_labels.astype(int64_dtype), + effective_mask, + ) + + def __repr__(self): + return "Maximum({}, groupby={}, mask={})".format( + self.inputs[0].recursive_repr(), + self.inputs[1].recursive_repr(), + self.mask.recursive_repr(), + ) + + def graph_repr(self): + # Graphviz interprets `\l` as "divide label into lines, left-justified" + return "Maximum:\\l groupby: {}\\l mask: {}\\l".format( + self.inputs[1].recursive_repr(), + self.mask.recursive_repr(), + ) diff --git a/ziplime/pipeline/terms/filters/not_null_filter.py b/ziplime/pipeline/terms/filters/not_null_filter.py new file mode 100644 index 0000000000..b17a41f62f --- /dev/null +++ b/ziplime/pipeline/terms/filters/not_null_filter.py @@ -0,0 +1,29 @@ +from ziplime.lib.labelarray import LabelArray +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.pipeline.terms.filters import Filter +from ziplime.utils.numpy_utils import is_missing + + +class NotNullFilter(SingleInputMixin, Filter): + """ + A Filter indicating whether input values are **not** missing from an input. + + Parameters + ---------- + factor : ziplime.pipeline.Term + The factor to compare against its missing_value. + """ + + window_length = 0 + + def __new__(cls, term): + return super(NotNullFilter, cls).__new__( + cls, + inputs=(term,), + ) + + def _compute(self, arrays, dates, assets, mask): + data = arrays[0] + if isinstance(data, LabelArray): + return ~data.is_missing() + return ~is_missing(arrays[0], self.inputs[0].missing_value) diff --git a/ziplime/pipeline/terms/filters/null_filter.py b/ziplime/pipeline/terms/filters/null_filter.py new file mode 100644 index 0000000000..5eee58e4cd --- /dev/null +++ b/ziplime/pipeline/terms/filters/null_filter.py @@ -0,0 +1,29 @@ +from ziplime.lib.labelarray import LabelArray +from ziplime.pipeline.mixins import SingleInputMixin +from ziplime.pipeline.terms.filters import Filter +from ziplime.utils.numpy_utils import is_missing + + +class NullFilter(SingleInputMixin, Filter): + """ + A Filter indicating whether input values are missing from an input. + + Parameters + ---------- + factor : ziplime.pipeline.Term + The factor to compare against its missing_value. + """ + + window_length = 0 + + def __new__(cls, term): + return super(NullFilter, cls).__new__( + cls, + inputs=(term,), + ) + + def _compute(self, arrays, dates, assets, mask): + data = arrays[0] + if isinstance(data, LabelArray): + return data.is_missing() + return is_missing(arrays[0], self.inputs[0].missing_value) diff --git a/ziplime/pipeline/terms/filters/num_expr_filter.py b/ziplime/pipeline/terms/filters/num_expr_filter.py new file mode 100644 index 0000000000..75130f7489 --- /dev/null +++ b/ziplime/pipeline/terms/filters/num_expr_filter.py @@ -0,0 +1,34 @@ +from ziplime.pipeline.expression import NumericalExpression +from ziplime.pipeline.terms.filters.filter import Filter +from ziplime.utils.numpy_utils import bool_dtype + + +class NumExprFilter(NumericalExpression, Filter): + """ + A Filter computed from a numexpr expression. + """ + + @classmethod + def create(cls, expr, binds): + """ + Helper for creating new NumExprFactors. + + This is just a wrapper around NumericalExpression.__new__ that always + forwards `bool` as the dtype, since Filters can only be of boolean + dtype. + """ + return cls(expr=expr, binds=binds, dtype=bool_dtype) + + def _compute(self, arrays, dates, assets, mask): + """ + Compute our result with numexpr, then re-apply `mask`. + """ + return ( + super(NumExprFilter, self)._compute( + arrays, + dates, + assets, + mask, + ) + & mask + ) \ No newline at end of file diff --git a/ziplime/pipeline/terms/filters/percentile_filter.py b/ziplime/pipeline/terms/filters/percentile_filter.py new file mode 100644 index 0000000000..b69905bd4e --- /dev/null +++ b/ziplime/pipeline/terms/filters/percentile_filter.py @@ -0,0 +1,100 @@ +from numpy import ( + float64, + nan, + nanpercentile, +) + +from ziplime.errors import ( + BadPercentileBounds, +) +from ziplime.pipeline.mixins import ( + SingleInputMixin, +) +from ziplime.pipeline.terms.filters import Filter + + +class PercentileFilter(SingleInputMixin, Filter): + """ + A Filter representing assets falling between percentile bounds of a Factor. + + Parameters + ---------- + factor : ziplime.pipeline.factor.Factor + The factor over which to compute percentile bounds. + min_percentile : float [0.0, 1.0] + The minimum percentile rank of an asset that will pass the filter. + max_percentile : float [0.0, 1.0] + The maxiumum percentile rank of an asset that will pass the filter. + """ + + window_length = 0 + + def __new__(cls, factor, min_percentile, max_percentile, mask): + return super(PercentileFilter, cls).__new__( + cls, + inputs=(factor,), + mask=mask, + min_percentile=min_percentile, + max_percentile=max_percentile, + ) + + def _init(self, min_percentile, max_percentile, *args, **kwargs): + self._min_percentile = min_percentile + self._max_percentile = max_percentile + return super(PercentileFilter, self)._init(*args, **kwargs) + + @classmethod + def _static_identity(cls, min_percentile, max_percentile, *args, **kwargs): + return ( + super(PercentileFilter, cls)._static_identity(*args, **kwargs), + min_percentile, + max_percentile, + ) + + def _validate(self): + """ + Ensure that our percentile bounds are well-formed. + """ + if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0: + raise BadPercentileBounds( + min_percentile=self._min_percentile, + max_percentile=self._max_percentile, + upper_bound=100.0, + ) + return super(PercentileFilter, self)._validate() + + def _compute(self, arrays, dates, assets, mask): + """ + For each row in the input, compute a mask of all values falling between + the given percentiles. + """ + # TODO: Review whether there's a better way of handling small numbers + # of columns. + data = arrays[0].copy().astype(float64) + data[~mask] = nan + + # FIXME: np.nanpercentile **should** support computing multiple bounds + # at once, but there's a bug in the logic for multiple bounds in numpy + # 1.9.2. It will be fixed in 1.10. + # c.f. https://github.com/numpy/numpy/pull/5981 + lower_bounds = nanpercentile( + data, + self._min_percentile, + axis=1, + keepdims=True, + ) + upper_bounds = nanpercentile( + data, + self._max_percentile, + axis=1, + keepdims=True, + ) + return (lower_bounds <= data) & (data <= upper_bounds) + + def graph_repr(self): + # Graphviz interprets `\l` as "divide label into lines, left-justified" + return "{}:\\l min: {}, max: {}\\l".format( + type(self).__name__, + self._min_percentile, + self._max_percentile, + ) \ No newline at end of file diff --git a/ziplime/pipeline/terms/filters/single_asset.py b/ziplime/pipeline/terms/filters/single_asset.py new file mode 100644 index 0000000000..a2bd096e33 --- /dev/null +++ b/ziplime/pipeline/terms/filters/single_asset.py @@ -0,0 +1,43 @@ +from ziplime.errors import NonExistentAssetInTimeFrame +from ziplime.pipeline.terms.filters import Filter +from ziplime.utils.numpy_utils import repeat_first_axis + + +class SingleAsset(Filter): + """ + A Filter that computes to True only for the given asset. + """ + + inputs = [] + window_length = 1 + + def __new__(cls, asset): + return super(SingleAsset, cls).__new__(cls, asset=asset) + + def _init(self, asset, *args, **kwargs): + self._asset = asset + return super(SingleAsset, self)._init(*args, **kwargs) + + @classmethod + def _static_identity(cls, asset, *args, **kwargs): + return ( + super(SingleAsset, cls)._static_identity(*args, **kwargs), + asset, + ) + + def _compute(self, arrays, dates, assets, mask): + is_my_asset = assets == self._asset.sid + out = repeat_first_axis(is_my_asset, len(mask)) + # Raise an exception if `self._asset` does not exist for the entirety + # of the timeframe over which we are computing. + if (is_my_asset.sum() != 1) or ((out & mask).sum() != len(mask)): + raise NonExistentAssetInTimeFrame( + asset=self._asset, + start_date=dates[0], + end_date=dates[-1], + ) + return out + + def graph_repr(self): + # Graphviz interprets `\l` as "divide label into lines, left-justified" + return "SingleAsset:\\l asset: {!r}\\l".format(self._asset) diff --git a/ziplime/pipeline/terms/filters/static_assets.py b/ziplime/pipeline/terms/filters/static_assets.py new file mode 100644 index 0000000000..42b6d209b2 --- /dev/null +++ b/ziplime/pipeline/terms/filters/static_assets.py @@ -0,0 +1,20 @@ +from ziplime.pipeline.terms.filters.static_sids import StaticSids + + +class StaticAssets(StaticSids): + """ + A Filter that computes True for a specific set of predetermined assets. + + ``StaticAssets`` is mostly useful for debugging or for interactively + computing pipeline terms for a fixed set of assets that are known ahead of + time. + + Parameters + ---------- + assets : iterable[Asset] + An iterable of assets for which to filter. + """ + + def __new__(cls, assets): + sids = frozenset(asset.sid for asset in assets) + return super(StaticAssets, cls).__new__(cls, sids) diff --git a/ziplime/pipeline/terms/filters/static_sids.py b/ziplime/pipeline/terms/filters/static_sids.py new file mode 100644 index 0000000000..fdd82e8ef6 --- /dev/null +++ b/ziplime/pipeline/terms/filters/static_sids.py @@ -0,0 +1,29 @@ +from ziplime.pipeline.terms.filters import Filter +from ziplime.utils.numpy_utils import repeat_first_axis + + +class StaticSids(Filter): + """ + A Filter that computes True for a specific set of predetermined sids. + + ``StaticSids`` is mostly useful for debugging or for interactively + computing pipeline terms for a fixed set of sids that are known ahead of + time. + + Parameters + ---------- + sids : iterable[int] + An iterable of sids for which to filter. + """ + + inputs = () + window_length = 0 + params = ("sids",) + + def __new__(cls, sids): + sids = frozenset(sids) + return super(StaticSids, cls).__new__(cls, sids=sids) + + def _compute(self, arrays, dates, sids, mask): + my_columns = sids.isin(self.params["sids"]) + return repeat_first_axis(my_columns, len(mask)) & mask diff --git a/ziplime/pipeline/terms/input_dates.py b/ziplime/pipeline/terms/input_dates.py new file mode 100644 index 0000000000..6e56bb0003 --- /dev/null +++ b/ziplime/pipeline/terms/input_dates.py @@ -0,0 +1,31 @@ +from ziplime.pipeline import Term +from ziplime.utils.numpy_utils import datetime64ns_dtype + + +class InputDates(Term): + """ + 1-Dimensional term providing date labels for other term inputs. + + This term is guaranteed to be available as an input for any term computed + by SimplePipelineEngine.run_pipeline(). + """ + + ndim = 1 + dataset = None + dtype = datetime64ns_dtype + inputs = () + dependencies = {} + mask = None + windowed = False + window_safe = True + + def __repr__(self): + return "InputDates()" + + graph_repr = __repr__ + + def _compute(self, today, assets, out): + raise NotImplementedError( + "InputDates cannot be computed directly." + " Check your PipelineEngine configuration." + ) diff --git a/ziplime/pipeline/terms/loadable_term.py b/ziplime/pipeline/terms/loadable_term.py new file mode 100644 index 0000000000..f64e009d9b --- /dev/null +++ b/ziplime/pipeline/terms/loadable_term.py @@ -0,0 +1,16 @@ +from ziplime.pipeline.terms.term import Term + + +class LoadableTerm(Term): + """ + A Term that should be loaded from an external resource by a PipelineLoader. + + This is the base class for :class:`ziplime.pipeline.data.BoundColumn`. + """ + + windowed = False + inputs = () + + @property + def dependencies(self): + return {self.mask: 0} \ No newline at end of file diff --git a/ziplime/pipeline/terms/term.py b/ziplime/pipeline/terms/term.py new file mode 100644 index 0000000000..0e595f34d4 --- /dev/null +++ b/ziplime/pipeline/terms/term.py @@ -0,0 +1,368 @@ +""" +Base class for Filters, Factors and Classifiers +""" + +from abc import ABC, abstractmethod +from collections.abc import Mapping +from weakref import WeakValueDictionary + + +from ziplime.errors import NonSliceableTerm + +from ..domain import GENERIC +from ziplime.assets.entities.asset import Asset +from .utils import validate_dtype + + +class Term(ABC): + """ + Base class for objects that can appear in the compute graph of a + :class:`ziplime.pipeline.Pipeline`. + + Notes + ----- + Most Pipeline API users only interact with :class:`Term` via subclasses: + + - :class:`~ziplime.pipeline.data.BoundColumn` + - :class:`~ziplime.pipeline.Factor` + - :class:`~ziplime.pipeline.Filter` + - :class:`~ziplime.pipeline.Classifier` + + Instances of :class:`Term` are **memoized**. If you call a Term's + constructor with the same arguments twice, the same object will be returned + from both calls: + + **Example:** + + >>> from ziplime.pipeline.data import EquityPricing + >>> from ziplime.pipeline.factors import SimpleMovingAverage + >>> x = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5) + >>> y = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5) + >>> x is y + True + + .. warning:: + + Memoization of terms means that it's generally unsafe to modify + attributes of a term after construction. + """ + + # These are NotSpecified because a subclass is required to provide them. + dtype = None + missing_value = None + + # Subclasses aren't required to provide `params`. The default behavior is + # no params. + params = () + + # All terms are generic by default. + domain = GENERIC + + # Determines if a term is safe to be used as a windowed input. + window_safe = False + + # The dimensions of the term's output (1D or 2D). + ndim = 2 + + _term_cache = WeakValueDictionary() + + def __new__( + cls, + domain=None, + dtype=None, + missing_value=None, + window_safe=None, + ndim=None, + # params is explicitly not allowed to be passed to an instance. + *args, + **kwargs, + ): + """ + Memoized constructor for Terms. + + Caching previously-constructed Terms is useful because it allows us to + only compute equivalent sub-expressions once when traversing a Pipeline + dependency graph. + + Caching previously-constructed Terms is **sane** because terms and + their inputs are both conceptually immutable. + """ + # Subclasses can override these class-level attributes to provide + # different default values for instances. + if domain is None: + domain = cls.domain + if dtype is None: + dtype = cls.dtype + if missing_value is None: + missing_value = cls.missing_value + if ndim is None: + ndim = cls.ndim + if window_safe is None: + window_safe = cls.window_safe + + dtype, missing_value = validate_dtype( + cls.__name__, + dtype, + missing_value, + ) + params = cls._pop_params(kwargs) + + identity = cls._static_identity( + domain=domain, + dtype=dtype, + missing_value=missing_value, + window_safe=window_safe, + ndim=ndim, + params=params, + *args, + **kwargs, + ) + + try: + return cls._term_cache[identity] + except KeyError: + new_instance = cls._term_cache[identity] = ( + super(Term, cls) + .__new__(cls) + ._init( + domain=domain, + dtype=dtype, + missing_value=missing_value, + window_safe=window_safe, + ndim=ndim, + params=params, + *args, + **kwargs, + ) + ) + return new_instance + + @classmethod + def _pop_params(cls, kwargs): + """ + Pop entries from the `kwargs` passed to cls.__new__ based on the values + in `cls.params`. + + Parameters + ---------- + kwargs : dict + The kwargs passed to cls.__new__. + + Returns + ------- + params : list[(str, object)] + A list of string, value pairs containing the entries in cls.params. + + Raises + ------ + TypeError + Raised if any parameter values are not passed or not hashable. + """ + params = cls.params + if not isinstance(params, Mapping): + params = {k: None for k in params} + param_values = [] + for key, default_value in params.items(): + try: + value = kwargs.pop(key, default_value) + if value is None: + raise KeyError(key) + + # Check here that the value is hashable so that we fail here + # instead of trying to hash the param values tuple later. + hash(value) + except KeyError as exc: + raise TypeError( + "{typename} expected a keyword parameter {name!r}.".format( + typename=cls.__name__, name=key + ) + ) from exc + except TypeError as exc: + # Value wasn't hashable. + raise TypeError( + "{typename} expected a hashable value for parameter " + "{name!r}, but got {value!r} instead.".format( + typename=cls.__name__, + name=key, + value=value, + ) + ) from exc + + param_values.append((key, value)) + return tuple(param_values) + + def __init__(self, *args, **kwargs): + """ + Noop constructor to play nicely with our caching __new__. Subclasses + should implement _init instead of this method. + + When a class' __new__ returns an instance of that class, Python will + automatically call __init__ on the object, even if a new object wasn't + actually constructed. Because we memoize instances, we often return an + object that was already initialized from __new__, in which case we + don't want to call __init__ again. + + Subclasses that need to initialize new instances should override _init, + which is guaranteed to be called only once. + """ + pass + + def __getitem__(self, key: Asset): + from ziplime.pipeline.terms.loadable_term import LoadableTerm + if isinstance(self, LoadableTerm): + raise NonSliceableTerm(term=self) + + from ..mixins import SliceMixin + + slice_type = type(self)._with_mixin(SliceMixin) + return slice_type(self, key) + + @classmethod + def _static_identity(cls, domain, dtype, missing_value, window_safe, ndim, params): + """ + Return the identity of the Term that would be constructed from the + given arguments. + + Identities that compare equal will cause us to return a cached instance + rather than constructing a new one. We do this primarily because it + makes dependency resolution easier. + + This is a classmethod so that it can be called from Term.__new__ to + determine whether to produce a new instance. + """ + return (cls, domain, dtype, missing_value, window_safe, ndim, params) + + def _init(self, domain, dtype, missing_value, window_safe, ndim, params): + """ + Parameters + ---------- + domain : ziplime.pipeline.domain.Domain + The domain of this term. + dtype : np.dtype + Dtype of this term's output. + missing_value : object + Missing value for this term. + ndim : 1 or 2 + The dimensionality of this term. + params : tuple[(str, hashable)] + Tuple of key/value pairs of additional parameters. + """ + self.domain = domain + self.dtype = dtype + self.missing_value = missing_value + self.window_safe = window_safe + self.ndim = ndim + + for name, _ in params: + if hasattr(self, name): + raise TypeError( + "Parameter {name!r} conflicts with already-present" + " attribute with value {value!r}.".format( + name=name, + value=getattr(self, name), + ) + ) + # TODO: Consider setting these values as attributes and replacing + # the boilerplate in NumericalExpression, Rank, and + # PercentileFilter. + + self.params = dict(params) + + # Make sure that subclasses call super() in their _validate() methods + # by setting this flag. The base class implementation of _validate + # should set this flag to True. + self._subclass_called_super_validate = False + self._validate() + assert self._subclass_called_super_validate, ( + "Term._validate() was not called.\n" + "This probably means that you overrode _validate" + " without calling super()." + ) + del self._subclass_called_super_validate + + return self + + def _validate(self): + """ + Assert that this term is well-formed. This should be called exactly + once, at the end of Term._init(). + """ + # mark that we got here to enforce that subclasses overriding _validate + # call super(). + self._subclass_called_super_validate = True + + def compute_extra_rows(self, all_dates, start_date, end_date, min_extra_rows): + """ + Calculate the number of extra rows needed to compute ``self``. + + Must return at least ``min_extra_rows``, and the default implementation + is to just return ``min_extra_rows``. This is overridden by + downsampled terms to ensure that the first date computed is a + recomputation date. + + Parameters + ---------- + all_dates : pd.DatetimeIndex + The trading sessions against which ``self`` will be computed. + start_date : pd.Timestamp + The first date for which final output is requested. + end_date : pd.Timestamp + The last date for which final output is requested. + min_extra_rows : int + The minimum number of extra rows required of ``self``, as + determined by other terms that depend on ``self``. + + Returns + ------- + extra_rows : int + The number of extra rows to compute. Must be at least + ``min_extra_rows``. + """ + return min_extra_rows + + @property + @abstractmethod + def inputs(self): + """ + A tuple of other Terms needed as inputs for ``self``. + """ + raise NotImplementedError("inputs") + + @property + @abstractmethod + def windowed(self): + """ + Boolean indicating whether this term is a trailing-window computation. + """ + raise NotImplementedError("windowed") + + @property + @abstractmethod + def mask(self): + """ + A :class:`~ziplime.pipeline.Filter` representing asset/date pairs to + while computing this Term. True means include; False means exclude. + """ + raise NotImplementedError("mask") + + @property + @abstractmethod + def dependencies(self): + """ + A dictionary mapping terms that must be computed before `self` to the + number of extra rows needed for those terms. + """ + raise NotImplementedError("dependencies") + + def graph_repr(self): + """A short repr to use when rendering GraphViz graphs.""" + # Default graph_repr is just the name of the type. + return type(self).__name__ + + def recursive_repr(self): + """A short repr to use when recursively rendering terms with inputs.""" + # Default recursive_repr is just the name of the type. + return type(self).__name__ + + + diff --git a/ziplime/pipeline/terms/utils.py b/ziplime/pipeline/terms/utils.py new file mode 100644 index 0000000000..448745d64e --- /dev/null +++ b/ziplime/pipeline/terms/utils.py @@ -0,0 +1,99 @@ +from ziplime.errors import UnsupportedDType, NotDType, DTypeNotSpecified +from ziplime.lib.adjusted_array import can_represent_dtype +from ziplime.lib.labelarray import LabelArray +from ziplime.utils.numpy_utils import categorical_dtype, default_missing_value_for_dtype + +from numpy import ( + array, + dtype as dtype_class, +) + + +def validate_dtype(termname, dtype, missing_value): + """ + Validate a `dtype` and `missing_value` passed to Term.__new__. + + Ensures that we know how to represent ``dtype``, and that missing_value + is specified for types without default missing values. + + Returns + ------- + validated_dtype, validated_missing_value : np.dtype, any + The dtype and missing_value to use for the new term. + + Raises + ------ + DTypeNotSpecified + When no dtype was passed to the instance, and the class doesn't + provide a default. + NotDType + When either the class or the instance provides a value not + coercible to a numpy dtype. + NoDefaultMissingValue + When dtype requires an explicit missing_value, but + ``missing_value`` is None. + """ + if dtype is None: + raise DTypeNotSpecified(termname=termname) + + try: + dtype = dtype_class(dtype) + except TypeError as exc: + raise NotDType(dtype=dtype, termname=termname) from exc + + if not can_represent_dtype(dtype): + raise UnsupportedDType(dtype=dtype, termname=termname) + + if missing_value is None: + missing_value = default_missing_value_for_dtype(dtype) + + try: + _coerce_to_dtype(missing_value, dtype) + except TypeError as exc: + raise TypeError( + "Missing value {value!r} is not a valid choice " + "for term {termname} with dtype {dtype}.\n\n" + "Coercion attempt failed with: {error}".format( + termname=termname, + value=missing_value, + dtype=dtype, + error=exc, + ) + ) from exc + + return dtype, missing_value + + +def _assert_valid_categorical_missing_value(value): + """ + Check that value is a valid categorical missing_value. + + Raises a TypeError if the value is cannot be used as the missing_value for + a categorical_dtype Term. + """ + label_types = LabelArray.SUPPORTED_SCALAR_TYPES + if not isinstance(value, label_types): + raise TypeError( + "String-dtype classifiers can only produce {types}.".format( + types=" or ".join([t.__name__ for t in label_types]) + ) + ) + + +def _coerce_to_dtype(value, dtype): + if dtype == categorical_dtype: + # This check is necessary because we use object dtype for + # categoricals, and numpy will allow us to promote numerical + # values to object even though we don't support them. + _assert_valid_categorical_missing_value(value) + return value + else: + # For any other type, cast using the same rules as numpy's astype + # function with casting='same_kind'. + # + # 'same_kind' allows casting between things like float32 and float64, + # but not between str and int. Note that the name is somewhat + # misleading, since it does allow conversion between different dtype + # kinds in some cases. In particular, conversion from int to float is + # allowed. + return array([value]).astype(dtype=dtype, casting="same_kind")[0] diff --git a/ziplime/pipeline/visualize.py b/ziplime/pipeline/visualize.py index fdab363f07..a1671e4415 100644 --- a/ziplime/pipeline/visualize.py +++ b/ziplime/pipeline/visualize.py @@ -12,7 +12,7 @@ from ziplime.pipeline.data import BoundColumn from ziplime.pipeline import Filter, Factor, Classifier, Term -from ziplime.pipeline.term import AssetExists +from ziplime.pipeline.terms.asset_exists import AssetExists class NoIPython(Exception): diff --git a/ziplime/sources/benchmark_source.py b/ziplime/sources/benchmark_source.py index 7b18d49d2b..30f7be6230 100644 --- a/ziplime/sources/benchmark_source.py +++ b/ziplime/sources/benchmark_source.py @@ -5,7 +5,6 @@ from ziplime.assets.entities.asset import Asset from ziplime.assets.services.asset_service import AssetService -from ziplime.data.domain.data_bundle import DataBundle from ziplime.errors import ( InvalidBenchmarkAsset, BenchmarkAssetNotAvailableTooEarly, @@ -38,7 +37,6 @@ def __init__( self._precalculated_series = pl.Series() elif benchmark_asset is not None: - self._validate_benchmark(benchmark_asset=benchmark_asset) self._precalculated_series = self._initialize_precalculated_series( asset=benchmark_asset, trading_calendar=trading_calendar, trading_days=sessions, exchange=exchange @@ -138,23 +136,23 @@ def daily_returns(self, start: datetime.datetime, end: datetime.datetime | None return daily_returns.filter(pl.col("date").is_between(start, end)) - def _validate_benchmark(self, benchmark_asset: Asset): + async def validate_benchmark(self, benchmark_asset: Asset): # check if this security has a stock dividend. if so, raise an # error suggesting that the user pick a different asset to use # as benchmark. - stock_dividends = self.asset_service.get_stock_dividends( - sid=self.benchmark_asset.sid, trading_days=self.sessions + stock_dividends = await self.asset_service.get_stock_dividends( + sid=benchmark_asset.sid, trading_days=self.sessions ) if len(stock_dividends) > 0: raise InvalidBenchmarkAsset( - sid=str(self.benchmark_asset), dt=stock_dividends[0]["ex_date"] + sid=str(benchmark_asset), dt=stock_dividends[0]["ex_date"] ) if benchmark_asset.start_date > self.sessions[0]: # the asset started trading after the first simulation day raise BenchmarkAssetNotAvailableTooEarly( - sid=str(self.benchmark_asset), + sid=str(benchmark_asset), dt=self.sessions[0], start_dt=benchmark_asset.start_date, ) @@ -162,7 +160,7 @@ def _validate_benchmark(self, benchmark_asset: Asset): if benchmark_asset.end_date < self.sessions[-1]: # the asset stopped trading before the last simulation day raise BenchmarkAssetNotAvailableTooLate( - sid=str(self.benchmark_asset), + sid=str(benchmark_asset), dt=self.sessions[-1], end_dt=benchmark_asset.end_date, ) diff --git a/ziplime/trading/trading_algorithm.py b/ziplime/trading/trading_algorithm.py index 6706c76ccf..241ca2b05f 100644 --- a/ziplime/trading/trading_algorithm.py +++ b/ziplime/trading/trading_algorithm.py @@ -44,6 +44,7 @@ from ziplime.trading.enums.order_side import OrderSide from ziplime.trading.enums.order_type import OrderType from ziplime.trading.enums.simulation_event import SimulationEvent +from ziplime.trading.trading_algorithm_execution_result import TradingAlgorithmExecutionResult from ziplime.trading.trading_signal_executor import TradingSignalExecutor from ziplime.utils.calendar_utils import get_calendar @@ -202,6 +203,7 @@ def __init__( stop_on_error: bool = False ): self.algorithm = algorithm + self.config = algorithm.config self.exchanges = exchanges self.stop_on_error = stop_on_error self.default_exchange = list(self.exchanges.values())[0] @@ -1953,7 +1955,7 @@ def default_pipeline_domain(calendar): This will be used to infer a domain for pipelines that only use generic datasets when running in the context of a TradingAlgorithm. """ - return _DEFAULT_DOMAINS.get(calendar.name, domain.GENERIC) + return domain.GENERIC ################## # End Pipeline API @@ -2034,7 +2036,7 @@ async def every_bar( for new_order in new_orders.values(): self._ledger.process_order(order=new_order) - def once_a_day( + async def once_a_day( self, midnight_dt, current_data, @@ -2052,7 +2054,7 @@ def once_a_day( # self.datetime = midnight_dt # self.on_dt_changed(midnight_dt) - self.metrics_tracker.handle_market_open(session_label=midnight_dt) + await self.metrics_tracker.handle_market_open(session_label=midnight_dt) # handle any splits that impact any positions or any open orders. assets_we_care_about = ( @@ -2060,7 +2062,7 @@ def once_a_day( ) if assets_we_care_about: - splits = asset_service.get_splits(assets_we_care_about, midnight_dt) + splits = await asset_service.get_splits(assets_we_care_about, midnight_dt) if splits: self.blotter.process_splits(splits) self._ledger.process_splits(splits) @@ -2113,7 +2115,7 @@ async def transform(self): handle_data=self.event_manager.handle_data): yield capital_change_packet, [] elif action == SimulationEvent.SESSION_START: - for capital_change_packet in self.once_a_day(midnight_dt=dt, + async for capital_change_packet in self.once_a_day(midnight_dt=dt, current_data=self.current_data, asset_service=self.asset_service): yield capital_change_packet, [] @@ -2231,7 +2233,3 @@ def _get_minute_message(self, dt: datetime.datetime): minute_message["minute_perf"]["recorded_vars"] = rvars return minute_message - - -# Map from calendar name to default domain for that calendar. -_DEFAULT_DOMAINS = {d.calendar_name: d for d in domain.BUILT_IN_DOMAINS} diff --git a/ziplime/utils/pandas_utils.py b/ziplime/utils/pandas_utils.py index 4e0c87b9ae..d1ededb20c 100644 --- a/ziplime/utils/pandas_utils.py +++ b/ziplime/utils/pandas_utils.py @@ -10,7 +10,7 @@ import pandas as pd -def explode(df): +def explode(df: pd.DataFrame): """Take a DataFrame and return a triple of (df.index, df.columns, df.values) @@ -86,7 +86,7 @@ def ignore_pandas_nan_categorical_warning(): yield -def categorical_df_concat(df_list, inplace=False): +def categorical_df_concat(df_list: list[pd.DataFrame], inplace: bool=False): """Prepare list of pandas DataFrames to be used as input to pd.concat. Ensure any columns of type 'category' have the same categories across each dataframe. diff --git a/ziplime/utils/run_algo.py b/ziplime/utils/run_algo.py index 4989db64ed..4ea338894a 100644 --- a/ziplime/utils/run_algo.py +++ b/ziplime/utils/run_algo.py @@ -11,6 +11,7 @@ from ziplime.finance.blotter.in_memory_blotter import InMemoryBlotter from ziplime.gens.domain.trading_clock import TradingClock +from ziplime.pipeline.loaders import EquityPricingLoader from ziplime.sources.benchmark_source import BenchmarkSource import polars as pl @@ -23,8 +24,7 @@ except ImportError: PYGMENTS = False -from ziplime.pipeline.data import USEquityPricing -from ziplime.pipeline.loaders import USEquityPricingLoader +from ziplime.pipeline.data.equity_pricing import EquityPricing from ziplime.trading.trading_algorithm import TradingAlgorithm @@ -60,10 +60,12 @@ async def run_algorithm( else: logger.info(f"\n{algorithm.algorithm_text}") exchanges_dict = {exchange.name: exchange for exchange in exchanges} - pipeline_loader = USEquityPricingLoader.without_fx(None) # TODO: fix pipeline + pipeline_loader = EquityPricingLoader.without_fx(data_source=None, + asset_service=asset_service + ) # TODO: fix pipeline def choose_loader(column): - if column in USEquityPricing.columns: + if column in EquityPricing.columns: return pipeline_loader try: return custom_loader.get(column) @@ -93,6 +95,7 @@ def choose_loader(column): emission_rate=clock.emission_rate, benchmark_fields=frozenset({"close"}) ) + await benchmark_source.validate_benchmark(benchmark_asset=benchmark_asset) tr = TradingAlgorithm( exchanges=exchanges_dict, @@ -100,9 +103,7 @@ def choose_loader(column): get_pipeline_loader=choose_loader, metrics_set=metrics_set, blotter=InMemoryBlotter(exchanges=exchanges_dict, cancel_policy=None), - # benchmark_source=get_benchmark(clock=clock), benchmark_source=benchmark_source, - # benchmark_source=None, algorithm=algorithm, clock=clock, stop_on_error=stop_on_error,